From 772e92eca9ff56f01a390c38fca5d6357e389188 Mon Sep 17 00:00:00 2001 From: Olaf Kilian Date: Sun, 3 May 2015 17:13:34 +0200 Subject: [PATCH 01/64] Add docker_login module - Ansible version of "docker login" CLI command - Persists Docker registry authentification in .dockercfg (only login once - no need to specify credentials over and over again anymore) - Works for all other docker-py based modules (docker, docker_images) as well as the Docker CLI client --- cloud/docker/__init__.py | 0 cloud/docker/docker_login.py | 243 +++++++++++++++++++++++++++++++++++ 2 files changed, 243 insertions(+) create mode 100644 cloud/docker/__init__.py create mode 100644 cloud/docker/docker_login.py diff --git a/cloud/docker/__init__.py b/cloud/docker/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py new file mode 100644 index 00000000000..023cbda73a9 --- /dev/null +++ b/cloud/docker/docker_login.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# + +# (c) 2015, Olaf Kilian +# +# This file is part of Ansible +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +###################################################################### + +DOCUMENTATION = ''' +--- +module: docker_login +author: Olaf Kilian +version_added: "1.9" +short_description: Manage Docker registry logins +description: + - Ansible version of the "docker login" CLI command. + - This module allows you to login to a Docker registry without directly pulling an image or performing any other actions. + - It will write your login credentials to your local .dockercfg file that is compatible to the Docker CLI client as well as docker-py and all other Docker related modules that are based on docker-py. +options: + registry: + description: + - URL of the registry, for example: https://index.docker.io/v1/ + required: true + default: null + aliases: [] + username: + description: + - The username for the registry account + required: true + default: null + aliases: [] + password: + description: + - The plaintext password for the registry account + required: true + default: null + aliases: [] + email: + description: + - The email address for the registry account + required: false + default: None + aliases: [] + reauth: + description: + - Whether refresh existing authentication on the Docker server (boolean) + required: false + default: false + aliases: [] + dockercfg_path: + description: + - Use a custom path for the .dockercfg file + required: false + default: ~/.dockercfg + aliases: [] + docker_url: + descriptions: + - Refers to the protocol+hostname+port where the Docker server is hosted + required: false + default: unix://var/run/docker.sock + aliases: [] + timeout: + description: + - The HTTP request timeout in seconds + required: false + default: 600 + aliases: [] + +requirements: [ "docker-py" ] +''' + +EXAMPLES = ''' +Login to a Docker registry without performing any other action. Make sure that the user you are using is either in the docker group which owns the Docker socket or use sudo to perform login actions: + +- name: login to DockerHub remote registry using your account + docker_login: + username: docker + password: rekcod + email: docker@docker.io + +- name: login to private Docker remote registry and force reauthentification + docker_login: + registry: https://your.private.registry.io/v1/ + username: yourself + password: secrets3 + reauth: yes + +- name: login to DockerHub remote registry using a custom dockercfg file location + docker_login: + username: docker + password: rekcod + email: docker@docker.io + dockercfg_path: /tmp/.mydockercfg + +''' + +try: + import os.path + import sys + import json + import base64 + import docker.client + from requests.exceptions import * + from urlparse import urlparse +except ImportError, e: + print "failed=True msg='failed to import python module: %s'" % e + sys.exit(1) + +try: + from docker.errors import APIError as DockerAPIError +except ImportError: + from docker.client import APIError as DockerAPIError + +class DockerLoginManager: + + def __init__(self, module): + + self.module = module + self.registry = self.module.params.get('registry') + self.username = self.module.params.get('username') + self.password = self.module.params.get('password') + self.email = self.module.params.get('email') + self.reauth = self.module.params.get('reauth') + self.dockercfg_path = os.path.expanduser(self.module.params.get('dockercfg_path')) + + docker_url = urlparse(module.params.get('docker_url')) + self.client = docker.Client(base_url=docker_url.geturl(), timeout=module.params.get('timeout')) + + self.changed = False + self.response = False + self.log = list() + + def login(self): + + if self.reauth: + self.log.append("Enforcing reauthentification") + + # Connect to registry and login if not already logged in or reauth is enforced. + try: + self.response = self.client.login( + self.username, + password=self.password, + email=self.email, + registry=self.registry, + reauth=self.reauth, + dockercfg_path=self.dockercfg_path + ) + except Exception as e: + self.module.fail_json(msg="failed to login to the remote registry", error=repr(e)) + + # Get status from registry response. + if self.response.has_key("Status"): + self.log.append(self.response["Status"]) + if self.response["Status"] == "Login Succeeded": + self.changed = True + else: + self.log.append("Already Authentificated") + + # Update the dockercfg if changed but not failed. + if self.has_changed(): + self.update_dockercfg() + + # This is what the underlaying docker-py unfortunately doesn't do (yet). + def update_dockercfg(self): + + # Create dockercfg file if it does not exist. + if not os.path.exists(self.dockercfg_path): + open(self.dockercfg_path, "w") + self.log.append("Created new Docker config file at %s" % self.dockercfg_path) + else: + self.log.append("Updated existing Docker config file at %s" % self.dockercfg_path) + + # Get existing dockercfg into a dict. + try: + docker_config = json.load(open(self.dockercfg_path, "r")) + except ValueError: + docker_config = dict() + if not docker_config.has_key(self.registry): + docker_config[self.registry] = dict() + docker_config[self.registry] = dict( + auth = base64.b64encode(self.username + b':' + self.password), + email = self.email + ) + + # Write updated dockercfg to dockercfg file. + try: + json.dump(docker_config, open(self.dockercfg_path, "w"), indent=4, sort_keys=True) + except Exception as e: + self.module.fail_json(msg="failed to write auth details to file", error=repr(e)) + + # Compatible to docker-py auth.decode_docker_auth() + def encode_docker_auth(self, auth): + s = base64.b64decode(auth) + login, pwd = s.split(b':', 1) + return login.decode('ascii'), pwd.decode('ascii') + + def get_msg(self): + return ". ".join(self.log) + + def has_changed(self): + return self.changed + +def main(): + + module = AnsibleModule( + argument_spec = dict( + registry = dict(required=True, default=None), + username = dict(required=True, default=None), + password = dict(required=True, default=None), + email = dict(required=False, default=None), + reauth = dict(required=False, default=False, type='bool'), + dockercfg_path = dict(required=False, default='~/.dockercfg'), + docker_url = dict(default='unix://var/run/docker.sock'), + timeout = dict(default=10, type='int') + ) + ) + + try: + manager = DockerLoginManager(module) + manager.login() + module.exit_json(changed=manager.has_changed(), msg=manager.get_msg(), registry=manager.registry) + + except Exception as e: + module.fail_json(msg="Module execution has failed due to an unexpected error", error=repr(e)) + +# import module snippets +from ansible.module_utils.basic import * + +main() From e362583abdf4f1ebd3bb44683a9aaf678b2456db Mon Sep 17 00:00:00 2001 From: xiaclo Date: Mon, 4 May 2015 14:08:39 +1000 Subject: [PATCH 02/64] Allow NPM to update packages --- packaging/language/npm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/npm.py b/packaging/language/npm.py index 8407589116a..6f3767988e1 100644 --- a/packaging/language/npm.py +++ b/packaging/language/npm.py @@ -250,7 +250,7 @@ def main(): outdated = npm.list_outdated() if len(missing) or len(outdated): changed = True - npm.install() + npm.update() else: #absent installed, missing = npm.list() if name in installed: From feb20eeadd62ed683870c033394037fa99437848 Mon Sep 17 00:00:00 2001 From: Olaf Kilian Date: Wed, 6 May 2015 22:28:36 +0200 Subject: [PATCH 03/64] Update PR based on review from @resmo --- cloud/docker/docker_login.py | 55 +++++++++++++++++------------------- 1 file changed, 26 insertions(+), 29 deletions(-) diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py index 023cbda73a9..09d9d599432 100644 --- a/cloud/docker/docker_login.py +++ b/cloud/docker/docker_login.py @@ -24,7 +24,7 @@ DOCUMENTATION = ''' --- module: docker_login author: Olaf Kilian -version_added: "1.9" +version_added: "2.0" short_description: Manage Docker registry logins description: - Ansible version of the "docker login" CLI command. @@ -35,50 +35,38 @@ options: description: - URL of the registry, for example: https://index.docker.io/v1/ required: true - default: null - aliases: [] username: description: - The username for the registry account required: true - default: null - aliases: [] password: description: - The plaintext password for the registry account required: true - default: null - aliases: [] email: description: - The email address for the registry account required: false - default: None - aliases: [] reauth: description: - Whether refresh existing authentication on the Docker server (boolean) required: false default: false - aliases: [] dockercfg_path: description: - Use a custom path for the .dockercfg file required: false default: ~/.dockercfg - aliases: [] docker_url: descriptions: - Refers to the protocol+hostname+port where the Docker server is hosted required: false default: unix://var/run/docker.sock - aliases: [] timeout: description: - The HTTP request timeout in seconds required: false default: 600 - aliases: [] requirements: [ "docker-py" ] ''' @@ -108,22 +96,24 @@ Login to a Docker registry without performing any other action. Make sure that t ''' +import os.path +import sys +import json +import base64 +from urlparse import urlparse + try: - import os.path - import sys - import json - import base64 import docker.client - from requests.exceptions import * - from urlparse import urlparse + from docker.errors import APIError as DockerAPIError + has_lib_docker = True except ImportError, e: - print "failed=True msg='failed to import python module: %s'" % e - sys.exit(1) + has_lib_docker = False try: - from docker.errors import APIError as DockerAPIError -except ImportError: - from docker.client import APIError as DockerAPIError + from requests.exceptions import * + has_lib_requests_execeptions = True +except ImportError, e: + has_lib_requests_execeptions = False class DockerLoginManager: @@ -171,7 +161,7 @@ class DockerLoginManager: self.log.append("Already Authentificated") # Update the dockercfg if changed but not failed. - if self.has_changed(): + if self.has_changed() and not self.module.check_mode: self.update_dockercfg() # This is what the underlaying docker-py unfortunately doesn't do (yet). @@ -218,17 +208,24 @@ def main(): module = AnsibleModule( argument_spec = dict( - registry = dict(required=True, default=None), - username = dict(required=True, default=None), - password = dict(required=True, default=None), + registry = dict(required=True), + username = dict(required=True), + password = dict(required=True), email = dict(required=False, default=None), reauth = dict(required=False, default=False, type='bool'), dockercfg_path = dict(required=False, default='~/.dockercfg'), docker_url = dict(default='unix://var/run/docker.sock'), timeout = dict(default=10, type='int') - ) + ), + supports_check_mode=True ) + if not has_lib_docker: + module.fail_json(msg="python library docker-py required: pip install docker-py==1.1.0") + + if not has_lib_requests_execeptions: + module.fail_json(msg="python library requests required: pip install requests") + try: manager = DockerLoginManager(module) manager.login() From 7a2a75f6c0474edfdf9a74f5b2f6072d5dee6fd5 Mon Sep 17 00:00:00 2001 From: Olaf Kilian Date: Wed, 6 May 2015 22:33:31 +0200 Subject: [PATCH 04/64] Remove registry from exit_json because misleading docker-py is not returning the name of the registry if already logged in. It can differ from the registry specified by the user, which was return as registry. --- cloud/docker/docker_login.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py index 09d9d599432..db8fa906320 100644 --- a/cloud/docker/docker_login.py +++ b/cloud/docker/docker_login.py @@ -229,7 +229,7 @@ def main(): try: manager = DockerLoginManager(module) manager.login() - module.exit_json(changed=manager.has_changed(), msg=manager.get_msg(), registry=manager.registry) + module.exit_json(changed=manager.has_changed(), msg=manager.get_msg()) except Exception as e: module.fail_json(msg="Module execution has failed due to an unexpected error", error=repr(e)) From f5e7ce00e7ab5b5331d43fb3d81649f2124efbf4 Mon Sep 17 00:00:00 2001 From: Olaf Kilian Date: Wed, 6 May 2015 22:43:28 +0200 Subject: [PATCH 05/64] Extract only the hostname part from self.registry This is needed for update_dockercfg() to register only the host part of a specified registry URL in the .dockercfg. --- cloud/docker/docker_login.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py index db8fa906320..be5a46977a7 100644 --- a/cloud/docker/docker_login.py +++ b/cloud/docker/docker_login.py @@ -139,6 +139,10 @@ class DockerLoginManager: if self.reauth: self.log.append("Enforcing reauthentification") + # Extract hostname part from self.registry if url was specified. + registry_url = urlparse(self.registry) + self.registry = registry_url.netloc or registry_url.path + # Connect to registry and login if not already logged in or reauth is enforced. try: self.response = self.client.login( From 3d3efa3614bd69286d538914f72b1f60b066f07a Mon Sep 17 00:00:00 2001 From: Olaf Kilian Date: Thu, 7 May 2015 09:15:04 +0200 Subject: [PATCH 06/64] Removed unused import of sys module --- cloud/docker/docker_login.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py index be5a46977a7..a6f119168bc 100644 --- a/cloud/docker/docker_login.py +++ b/cloud/docker/docker_login.py @@ -97,7 +97,6 @@ Login to a Docker registry without performing any other action. Make sure that t ''' import os.path -import sys import json import base64 from urlparse import urlparse From 30fa6e3ea4f5177d6d8d3732d26452792264777e Mon Sep 17 00:00:00 2001 From: Olaf Kilian Date: Thu, 7 May 2015 09:35:40 +0200 Subject: [PATCH 07/64] Added default email address --- cloud/docker/docker_login.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py index a6f119168bc..b515b414c5a 100644 --- a/cloud/docker/docker_login.py +++ b/cloud/docker/docker_login.py @@ -47,6 +47,7 @@ options: description: - The email address for the registry account required: false + default: anonymous@localhost.local reauth: description: - Whether refresh existing authentication on the Docker server (boolean) @@ -214,7 +215,7 @@ def main(): registry = dict(required=True), username = dict(required=True), password = dict(required=True), - email = dict(required=False, default=None), + email = dict(required=False, default='anonymous@localhost.local'), reauth = dict(required=False, default=False, type='bool'), dockercfg_path = dict(required=False, default='~/.dockercfg'), docker_url = dict(default='unix://var/run/docker.sock'), From 3bcb24e6569a446c489f6058239db42adbf73a24 Mon Sep 17 00:00:00 2001 From: Olaf Kilian Date: Thu, 7 May 2015 09:36:32 +0200 Subject: [PATCH 08/64] Added more meaningful fail messages on Docker API --- cloud/docker/docker_login.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py index b515b414c5a..1292fe38909 100644 --- a/cloud/docker/docker_login.py +++ b/cloud/docker/docker_login.py @@ -153,6 +153,8 @@ class DockerLoginManager: reauth=self.reauth, dockercfg_path=self.dockercfg_path ) + except DockerAPIError as e: + self.module.fail_json(msg="Docker API Error: %s" % e.explanation) except Exception as e: self.module.fail_json(msg="failed to login to the remote registry", error=repr(e)) From 65c41451f08bc12a1e19b9f9f11cb1518ac7b8a1 Mon Sep 17 00:00:00 2001 From: Tom Bamford Date: Sun, 13 Sep 2015 15:37:23 +0000 Subject: [PATCH 09/64] Ensure tag values get updated in ec2_vpc_subnet --- cloud/amazon/ec2_vpc_subnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_vpc_subnet.py b/cloud/amazon/ec2_vpc_subnet.py index 45e84f66939..ec94459f4b1 100644 --- a/cloud/amazon/ec2_vpc_subnet.py +++ b/cloud/amazon/ec2_vpc_subnet.py @@ -163,7 +163,7 @@ def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode): if to_delete and not add_only: vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode) - to_add = dict((k, tags[k]) for k in tags if k not in cur_tags) + to_add = dict((k, tags[k]) for k in tags if k not in cur_tags or cur_tags[k] != tags[k]) if to_add: vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode) From 3b5c7f293635c6b436586aa9670059092118a852 Mon Sep 17 00:00:00 2001 From: Indrajit Raychaudhuri Date: Sun, 4 Oct 2015 21:03:11 -0500 Subject: [PATCH 10/64] homebrew: Add explicit documentation for 'path' argument (with expected default) In Homebew, a formula is installed in a location relative to the actual `brew` command. The documentation clarifies that. Additionally, removed redundant 'path' reconstruction in multiple places. --- packaging/os/homebrew.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/packaging/os/homebrew.py b/packaging/os/homebrew.py index d79a118b932..3607080d0e2 100644 --- a/packaging/os/homebrew.py +++ b/packaging/os/homebrew.py @@ -37,6 +37,11 @@ options: - name of package to install/remove required: false default: None + path: + description: + - ':' separated list of paths to search for 'brew' executable. Since A package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command, providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system. + required: false + default: '/usr/local/bin' state: description: - state of the package @@ -303,7 +308,7 @@ class Homebrew(object): return package # /class properties -------------------------------------------- }}} - def __init__(self, module, path=None, packages=None, state=None, + def __init__(self, module, path, packages=None, state=None, update_homebrew=False, upgrade_all=False, install_options=None): if not install_options: @@ -329,13 +334,8 @@ class Homebrew(object): setattr(self, key, val) def _prep(self): - self._prep_path() self._prep_brew_path() - def _prep_path(self): - if not self.path: - self.path = ['/usr/local/bin'] - def _prep_brew_path(self): if not self.module: self.brew_path = None @@ -770,7 +770,10 @@ def main(): required=False, type='list', ), - path=dict(required=False), + path=dict( + default="/usr/local/bin", + required=False, + ), state=dict( default="present", choices=[ @@ -808,8 +811,6 @@ def main(): path = p['path'] if path: path = path.split(':') - else: - path = ['/usr/local/bin'] state = p['state'] if state in ('present', 'installed'): From c6aeaf00b1bf5a42b88415ae17a88c6c6095bb21 Mon Sep 17 00:00:00 2001 From: Indrajit Raychaudhuri Date: Sun, 4 Oct 2015 23:04:46 -0500 Subject: [PATCH 11/64] homebrew: Aditional examples for documentation --- packaging/os/homebrew.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/packaging/os/homebrew.py b/packaging/os/homebrew.py index 3607080d0e2..5225e8091c5 100644 --- a/packaging/os/homebrew.py +++ b/packaging/os/homebrew.py @@ -69,10 +69,22 @@ options: notes: [] ''' EXAMPLES = ''' +# Install formula foo with 'brew' in default path (C(/usr/local/bin)) - homebrew: name=foo state=present + +# Install formula foo with 'brew' in alternate path C(/my/other/location/bin) +- homebrew: name=foo path=/my/other/location/bin state=present + +# Update homebrew first and install formula foo with 'brew' in default path - homebrew: name=foo state=present update_homebrew=yes + +# Update homebrew first and upgrade formula foo to latest available with 'brew' in default path - homebrew: name=foo state=latest update_homebrew=yes + +# Update homebrew and upgrade all packages - homebrew: update_homebrew=yes upgrade_all=yes + +# Miscellaneous other examples - homebrew: name=foo state=head - homebrew: name=foo state=linked - homebrew: name=foo state=absent From 592e30085144241b34781b3acedc861069f7764d Mon Sep 17 00:00:00 2001 From: YAEGASHI Takeshi Date: Thu, 13 Aug 2015 20:09:55 +0900 Subject: [PATCH 12/64] New module: blockinfile --- files/blockinfile.py | 292 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 292 insertions(+) create mode 100644 files/blockinfile.py diff --git a/files/blockinfile.py b/files/blockinfile.py new file mode 100644 index 00000000000..a8499547639 --- /dev/null +++ b/files/blockinfile.py @@ -0,0 +1,292 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, 2015 YAEGASHI Takeshi +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import re +import os +import tempfile + +DOCUMENTATION = """ +--- +module: blockinfile +author: + - 'YAEGASHI Takeshi (@yaegashi)' +extends_documentation_fragment: + - files + - validate +short_description: Insert/update/remove a text block + surrounded by marker lines. +version_added: '2.0' +description: + - This module will insert/update/remove a block of multi-line text + surrounded by customizable marker lines. +notes: + - This module supports check mode. +options: + dest: + aliases: [ name, destfile ] + required: true + description: + - The file to modify. + state: + required: false + choices: [ present, absent ] + default: present + description: + - Whether the block should be there or not. + marker: + required: false + default: '# {mark} ANSIBLE MANAGED BLOCK' + description: + - The marker line template. + "{mark}" will be replaced with "BEGIN" or "END". + block: + aliases: [ content ] + required: false + default: '' + description: + - The text to insert inside the marker lines. + If it's missing or an empty string, + the block will be removed as if C(state) were specified to C(absent). + insertafter: + required: false + default: EOF + description: + - If specified, the block will be inserted after the last match of + specified regular expression. A special value is available; C(EOF) for + inserting the block at the end of the file. If specified regular + expresion has no matches, C(EOF) will be used instead. + choices: [ 'EOF', '*regex*' ] + insertbefore: + required: false + default: None + description: + - If specified, the block will be inserted before the last match of + specified regular expression. A special value is available; C(BOF) for + inserting the block at the beginning of the file. If specified regular + expresion has no matches, the block will be inserted at the end of the + file. + choices: [ 'BOF', '*regex*' ] + create: + required: false + default: 'no' + choices: [ 'yes', 'no' ] + description: + - Create a new file if it doesn't exist. + backup: + required: false + default: 'no' + choices: [ 'yes', 'no' ] + description: + - Create a backup file including the timestamp information so you can + get the original file back if you somehow clobbered it incorrectly. +""" + +EXAMPLES = r""" +- name: insert/update "Match User" configuation block in /etc/ssh/sshd_config + blockinfile: + dest: /etc/ssh/sshd_config + block: | + Match User ansible-agent + PasswordAuthentication no + +- name: insert/update eth0 configuration stanza in /etc/network/interfaces + (it might be better to copy files into /etc/network/interfaces.d/) + blockinfile: + dest: /etc/network/interfaces + block: | + iface eth0 inet static + address 192.168.0.1 + netmask 255.255.255.0 + +- name: insert/update HTML surrounded by custom markers after line + blockinfile: + dest: /var/www/html/index.html + marker: "" + insertafter: "" + content: | +

Welcome to {{ansible_hostname}}

+

Last updated on {{ansible_date_time.iso8601}}

+ +- name: remove HTML as well as surrounding markers + blockinfile: + dest: /var/www/html/index.html + marker: "" + content: "" +""" + + +def write_changes(module, contents, dest): + + tmpfd, tmpfile = tempfile.mkstemp() + f = os.fdopen(tmpfd, 'wb') + f.write(contents) + f.close() + + validate = module.params.get('validate', None) + valid = not validate + if validate: + if "%s" not in validate: + module.fail_json(msg="validate must contain %%s: %s" % (validate)) + (rc, out, err) = module.run_command(validate % tmpfile) + valid = rc == 0 + if rc != 0: + module.fail_json(msg='failed to validate: ' + 'rc:%s error:%s' % (rc, err)) + if valid: + module.atomic_move(tmpfile, dest) + + +def check_file_attrs(module, changed, message): + + file_args = module.load_file_common_arguments(module.params) + if module.set_file_attributes_if_different(file_args, False): + + if changed: + message += " and " + changed = True + message += "ownership, perms or SE linux context changed" + + return message, changed + + +def main(): + module = AnsibleModule( + argument_spec=dict( + dest=dict(required=True, aliases=['name', 'destfile']), + state=dict(default='present', choices=['absent', 'present']), + marker=dict(default='# {mark} ANSIBLE MANAGED BLOCK', type='str'), + block=dict(default='', type='str', aliases=['content']), + insertafter=dict(default=None), + insertbefore=dict(default=None), + create=dict(default=False, type='bool'), + backup=dict(default=False, type='bool'), + validate=dict(default=None, type='str'), + ), + mutually_exclusive=[['insertbefore', 'insertafter']], + add_file_common_args=True, + supports_check_mode=True + ) + + params = module.params + dest = os.path.expanduser(params['dest']) + if module.boolean(params.get('follow', None)): + dest = os.path.realpath(dest) + + if os.path.isdir(dest): + module.fail_json(rc=256, + msg='Destination %s is a directory !' % dest) + + if not os.path.exists(dest): + if not module.boolean(params['create']): + module.fail_json(rc=257, + msg='Destination %s does not exist !' % dest) + original = None + lines = [] + else: + f = open(dest, 'rb') + original = f.read() + f.close() + lines = original.splitlines() + + insertbefore = params['insertbefore'] + insertafter = params['insertafter'] + block = params['block'] + marker = params['marker'] + present = params['state'] == 'present' + + if insertbefore is None and insertafter is None: + insertafter = 'EOF' + + if insertafter not in (None, 'EOF'): + insertre = re.compile(insertafter) + elif insertbefore not in (None, 'BOF'): + insertre = re.compile(insertbefore) + else: + insertre = None + + marker0 = re.sub(r'{mark}', 'BEGIN', marker) + marker1 = re.sub(r'{mark}', 'END', marker) + if present and block: + # Escape seqeuences like '\n' need to be handled in Ansible 1.x + if ANSIBLE_VERSION.startswith('1.'): + block = re.sub('', block, '') + blocklines = [marker0] + block.splitlines() + [marker1] + else: + blocklines = [] + + n0 = n1 = None + for i, line in enumerate(lines): + if line.startswith(marker0): + n0 = i + if line.startswith(marker1): + n1 = i + + if None in (n0, n1): + n0 = None + if insertre is not None: + for i, line in enumerate(lines): + if insertre.search(line): + n0 = i + if n0 is None: + n0 = len(lines) + elif insertafter is not None: + n0 += 1 + elif insertbefore is not None: + n0 = 0 # insertbefore=BOF + else: + n0 = len(lines) # insertafter=EOF + elif n0 < n1: + lines[n0:n1+1] = [] + else: + lines[n1:n0+1] = [] + n0 = n1 + + lines[n0:n0] = blocklines + + if lines: + result = '\n'.join(lines)+'\n' + else: + result = '' + if original == result: + msg = '' + changed = False + elif original is None: + msg = 'File created' + changed = True + elif not blocklines: + msg = 'Block removed' + changed = True + else: + msg = 'Block inserted' + changed = True + + if changed and not module.check_mode: + if module.boolean(params['backup']) and os.path.exists(dest): + module.backup_local(dest) + write_changes(module, result, dest) + + msg, changed = check_file_attrs(module, changed, msg) + module.exit_json(changed=changed, msg=msg) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.splitter import * +if __name__ == '__main__': + main() From d96ca9c8ec51f83c5c51fcb08f0f748eec3ff11e Mon Sep 17 00:00:00 2001 From: Caduri Date: Wed, 21 Oct 2015 14:34:11 +0300 Subject: [PATCH 13/64] [Bug] exchange name contains chars that needs to be quoted --- messaging/rabbitmq_exchange.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/messaging/rabbitmq_exchange.py b/messaging/rabbitmq_exchange.py index fb74298879b..728186385cb 100644 --- a/messaging/rabbitmq_exchange.py +++ b/messaging/rabbitmq_exchange.py @@ -133,7 +133,7 @@ def main(): module.params['login_host'], module.params['login_port'], urllib.quote(module.params['vhost'],''), - module.params['name'] + urllib.quote(module.params['name'],'') ) # Check if exchange already exists From 2a7b835f7752d21f3f8448dec71a2dff684bf43e Mon Sep 17 00:00:00 2001 From: Caduri Date: Wed, 21 Oct 2015 14:36:23 +0300 Subject: [PATCH 14/64] [Bug] exchange name contains chars that needs to be quoted --- messaging/rabbitmq_binding.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/messaging/rabbitmq_binding.py b/messaging/rabbitmq_binding.py index fc69f490fad..ad7fa151461 100644 --- a/messaging/rabbitmq_binding.py +++ b/messaging/rabbitmq_binding.py @@ -131,9 +131,9 @@ def main(): module.params['login_host'], module.params['login_port'], urllib.quote(module.params['vhost'],''), - module.params['name'], + urllib.quote(module.params['name'],''), dest_type, - module.params['destination'], + urllib.quote(module.params['destination'],''), urllib.quote(module.params['routing_key'],'') ) @@ -173,9 +173,9 @@ def main(): module.params['login_host'], module.params['login_port'], urllib.quote(module.params['vhost'],''), - module.params['name'], + urllib.quote(module.params['name'],''), dest_type, - module.params['destination'] + urllib.quote(module.params['destination'],'') ) r = requests.post( From 85cc47c9e1dc21b2eb61a2b09c390fb941da89c6 Mon Sep 17 00:00:00 2001 From: Olaf Kilian Date: Fri, 23 Oct 2015 06:28:28 +0200 Subject: [PATCH 15/64] Default registry to docker hub --- cloud/docker/docker_login.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py index 1292fe38909..cd670d345db 100644 --- a/cloud/docker/docker_login.py +++ b/cloud/docker/docker_login.py @@ -33,8 +33,9 @@ description: options: registry: description: - - URL of the registry, for example: https://index.docker.io/v1/ - required: true + - URL of the registry, defaults to: https://index.docker.io/v1/ + required: false + default: https://index.docker.io/v1/ username: description: - The username for the registry account @@ -214,7 +215,7 @@ def main(): module = AnsibleModule( argument_spec = dict( - registry = dict(required=True), + registry = dict(required=False, default='https://index.docker.io/v1/'), username = dict(required=True), password = dict(required=True), email = dict(required=False, default='anonymous@localhost.local'), From e86131c62f557aa67e7302a36b1f69746f7c802c Mon Sep 17 00:00:00 2001 From: Olaf Kilian Date: Fri, 23 Oct 2015 06:29:39 +0200 Subject: [PATCH 16/64] Add requirement for python >= 2.6 --- cloud/docker/docker_login.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py index cd670d345db..92ab168288f 100644 --- a/cloud/docker/docker_login.py +++ b/cloud/docker/docker_login.py @@ -70,7 +70,7 @@ options: required: false default: 600 -requirements: [ "docker-py" ] +requirements: [ "python >= 2.6", "docker-py" ] ''' EXAMPLES = ''' From c3d15a56cf8f291e7c76587689feb250facddbcb Mon Sep 17 00:00:00 2001 From: Olaf Kilian Date: Fri, 23 Oct 2015 06:33:10 +0200 Subject: [PATCH 17/64] Set default for email parameter to None --- cloud/docker/docker_login.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py index 92ab168288f..a1b469aead1 100644 --- a/cloud/docker/docker_login.py +++ b/cloud/docker/docker_login.py @@ -46,9 +46,9 @@ options: required: true email: description: - - The email address for the registry account + - The email address for the registry account. Note that private registries usually don't need this, but if you want to log into your Docker Hub account (default behaviour) you need to specify this in order to be able to log in. required: false - default: anonymous@localhost.local + default: None reauth: description: - Whether refresh existing authentication on the Docker server (boolean) @@ -218,7 +218,7 @@ def main(): registry = dict(required=False, default='https://index.docker.io/v1/'), username = dict(required=True), password = dict(required=True), - email = dict(required=False, default='anonymous@localhost.local'), + email = dict(required=False, default=None), reauth = dict(required=False, default=False, type='bool'), dockercfg_path = dict(required=False, default='~/.dockercfg'), docker_url = dict(default='unix://var/run/docker.sock'), From ef64423683093ed3624c05639f8e7a11e70359d1 Mon Sep 17 00:00:00 2001 From: Olaf Kilian Date: Fri, 23 Oct 2015 06:34:22 +0200 Subject: [PATCH 18/64] Make module importable for unit tests --- cloud/docker/docker_login.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py index a1b469aead1..cf8147c692b 100644 --- a/cloud/docker/docker_login.py +++ b/cloud/docker/docker_login.py @@ -244,4 +244,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() From 9d39885d18b77d63b15753f8a7399812388b869c Mon Sep 17 00:00:00 2001 From: Olaf Kilian Date: Wed, 28 Oct 2015 10:04:55 +0100 Subject: [PATCH 19/64] Adapt to new dockercfg file location and structure --- cloud/docker/docker_login.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py index cf8147c692b..d84abe6fe98 100644 --- a/cloud/docker/docker_login.py +++ b/cloud/docker/docker_login.py @@ -58,7 +58,7 @@ options: description: - Use a custom path for the .dockercfg file required: false - default: ~/.dockercfg + default: ~/.docker/config.json docker_url: descriptions: - Refers to the protocol+hostname+port where the Docker server is hosted @@ -176,6 +176,9 @@ class DockerLoginManager: # Create dockercfg file if it does not exist. if not os.path.exists(self.dockercfg_path): + dockercfg_path_dir = os.path.dirname(self.dockercfg_path) + if not os.path.exists(dockercfg_path_dir): + os.makedirs(dockercfg_path_dir) open(self.dockercfg_path, "w") self.log.append("Created new Docker config file at %s" % self.dockercfg_path) else: @@ -186,9 +189,11 @@ class DockerLoginManager: docker_config = json.load(open(self.dockercfg_path, "r")) except ValueError: docker_config = dict() - if not docker_config.has_key(self.registry): - docker_config[self.registry] = dict() - docker_config[self.registry] = dict( + if not docker_config.has_key("auths"): + docker_config["auths"] = dict() + if not docker_config["auths"].has_key(self.registry): + docker_config["auths"][self.registry] = dict() + docker_config["auths"][self.registry] = dict( auth = base64.b64encode(self.username + b':' + self.password), email = self.email ) @@ -220,7 +225,7 @@ def main(): password = dict(required=True), email = dict(required=False, default=None), reauth = dict(required=False, default=False, type='bool'), - dockercfg_path = dict(required=False, default='~/.dockercfg'), + dockercfg_path = dict(required=False, default='~/.docker/config.json'), docker_url = dict(default='unix://var/run/docker.sock'), timeout = dict(default=10, type='int') ), From 988be3458d9b699f7942e64211a364800cc446b1 Mon Sep 17 00:00:00 2001 From: Olaf Kilian Date: Wed, 28 Oct 2015 10:13:35 +0100 Subject: [PATCH 20/64] Rework change detection --- cloud/docker/docker_login.py | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py index d84abe6fe98..15216595fad 100644 --- a/cloud/docker/docker_login.py +++ b/cloud/docker/docker_login.py @@ -162,13 +162,9 @@ class DockerLoginManager: # Get status from registry response. if self.response.has_key("Status"): self.log.append(self.response["Status"]) - if self.response["Status"] == "Login Succeeded": - self.changed = True - else: - self.log.append("Already Authentificated") - # Update the dockercfg if changed but not failed. - if self.has_changed() and not self.module.check_mode: + # Update the dockercfg if not in check mode. + if not self.module.check_mode: self.update_dockercfg() # This is what the underlaying docker-py unfortunately doesn't do (yet). @@ -182,9 +178,9 @@ class DockerLoginManager: open(self.dockercfg_path, "w") self.log.append("Created new Docker config file at %s" % self.dockercfg_path) else: - self.log.append("Updated existing Docker config file at %s" % self.dockercfg_path) + self.log.append("Found existing Docker config file at %s" % self.dockercfg_path) - # Get existing dockercfg into a dict. + # Build a dict for the existing dockercfg. try: docker_config = json.load(open(self.dockercfg_path, "r")) except ValueError: @@ -193,16 +189,22 @@ class DockerLoginManager: docker_config["auths"] = dict() if not docker_config["auths"].has_key(self.registry): docker_config["auths"][self.registry] = dict() - docker_config["auths"][self.registry] = dict( + + # Calculate docker credentials based on current parameters. + new_docker_config = dict( auth = base64.b64encode(self.username + b':' + self.password), email = self.email ) - # Write updated dockercfg to dockercfg file. - try: - json.dump(docker_config, open(self.dockercfg_path, "w"), indent=4, sort_keys=True) - except Exception as e: - self.module.fail_json(msg="failed to write auth details to file", error=repr(e)) + # Update config if persisted credentials differ from current credentials. + if new_docker_config != docker_config["auths"][self.registry]: + docker_config["auths"][self.registry] = new_docker_config + try: + json.dump(docker_config, open(self.dockercfg_path, "w"), indent=4, sort_keys=True) + except Exception as e: + self.module.fail_json(msg="failed to write auth details to file", error=repr(e)) + self.log.append("Updated Docker config with new credentials.") + self.changed = True # Compatible to docker-py auth.decode_docker_auth() def encode_docker_auth(self, auth): From 98b21ee7f3d6edf082eeb329f53f1b7dafd23312 Mon Sep 17 00:00:00 2001 From: Olaf Kilian Date: Wed, 28 Oct 2015 10:14:54 +0100 Subject: [PATCH 21/64] Improve registry key parity between clients * Don't extract hostname part from docker_url since this leads to docker CLI client not recognizing Docker Hub credentials set by docker_login module anymore (looks for the full URL as a key). --- cloud/docker/docker_login.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py index 15216595fad..c00dc3f900d 100644 --- a/cloud/docker/docker_login.py +++ b/cloud/docker/docker_login.py @@ -84,7 +84,7 @@ Login to a Docker registry without performing any other action. Make sure that t - name: login to private Docker remote registry and force reauthentification docker_login: - registry: https://your.private.registry.io/v1/ + registry: your.private.registry.io username: yourself password: secrets3 reauth: yes @@ -140,10 +140,6 @@ class DockerLoginManager: if self.reauth: self.log.append("Enforcing reauthentification") - # Extract hostname part from self.registry if url was specified. - registry_url = urlparse(self.registry) - self.registry = registry_url.netloc or registry_url.path - # Connect to registry and login if not already logged in or reauth is enforced. try: self.response = self.client.login( From a21d935e66da40d6184b0dc12d3e894917279476 Mon Sep 17 00:00:00 2001 From: wimnat Date: Tue, 3 Nov 2015 01:03:31 +0000 Subject: [PATCH 22/64] Prevent ec2_remote_facts from failing when listing a terminated instance --- cloud/amazon/ec2_remote_facts.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_remote_facts.py b/cloud/amazon/ec2_remote_facts.py index cb92ccba74d..cf54fa0274d 100644 --- a/cloud/amazon/ec2_remote_facts.py +++ b/cloud/amazon/ec2_remote_facts.py @@ -76,6 +76,12 @@ def get_instance_info(instance): interfaces = [] for interface in instance.interfaces: interfaces.append({ 'id': interface.id, 'mac_address': interface.mac_address }.copy()) + + # If an instance is terminated, sourceDestCheck is no longer returned + try: + source_dest_check = instance.sourceDestCheck + except AttributeError: + source_dest_check = None instance_info = { 'id': instance.id, 'kernel': instance.kernel, @@ -90,7 +96,7 @@ def get_instance_info(instance): 'ramdisk': instance.ramdisk, 'tags': instance.tags, 'key_name': instance.key_name, - 'source_destination_check': instance.sourceDestCheck, + 'source_destination_check': source_dest_check, 'image_id': instance.image_id, 'groups': groups, 'interfaces': interfaces, From 975d7952b956ff004b5e97ecf2a52a86a034f567 Mon Sep 17 00:00:00 2001 From: Kenny Gryp Date: Tue, 3 Nov 2015 16:44:00 +0100 Subject: [PATCH 23/64] including error code and error number when database connection creation fails --- database/mysql/mysql_replication.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index 348f49df6c2..c8e342a1d23 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -337,7 +337,8 @@ def main(): else: db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password) except Exception, e: - module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") + errno, errstr = e.args + module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials (%s: %s)" % (errno, errstr) ) try: cursor = db_connection.cursor(cursorclass=MySQLdb.cursors.DictCursor) except Exception, e: From ea2fd78e6a1b1bf74d589241fc553a841adc660f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 3 Nov 2015 12:03:00 -0500 Subject: [PATCH 24/64] fixed default from None to [] for ctstate --- system/iptables.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/iptables.py b/system/iptables.py index 29010b730e5..e78295cc291 100644 --- a/system/iptables.py +++ b/system/iptables.py @@ -313,7 +313,7 @@ def main(): destination_port=dict(required=False, default=None, type='str'), to_ports=dict(required=False, default=None, type='str'), comment=dict(required=False, default=None, type='str'), - ctstate=dict(required=False, default=None, type='list'), + ctstate=dict(required=False, default=[], type='list'), ), ) args = dict( From 6a87eed58690ec8ccb0e6a37da3bdbb45f38e7ff Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 3 Nov 2015 12:12:39 -0500 Subject: [PATCH 25/64] made ctstate default to [] and evaluation conditional on the list being popoulated --- system/iptables.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/system/iptables.py b/system/iptables.py index e78295cc291..8c2a67eb636 100644 --- a/system/iptables.py +++ b/system/iptables.py @@ -262,8 +262,9 @@ def construct_rule(params): append_param(rule, params['to_ports'], '--to-ports', False) append_comm(rule, params['comment']) append_param(rule, params['comment'], '--comment', False) - append_conntrack(rule, params['ctstate']) - append_param(rule, ','.join(params['ctstate']), '--ctstate', False) + if params['ctstate']: + append_conntrack(rule, params['ctstate']) + append_param(rule, ','.join(params['ctstate']), '--ctstate', False) return rule From dbee2266e198f6d83837421b38612683b814166a Mon Sep 17 00:00:00 2001 From: Romain Brucker Date: Tue, 3 Nov 2015 11:41:30 -0600 Subject: [PATCH 26/64] Adding limit feature to iptables module --- system/iptables.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/system/iptables.py b/system/iptables.py index 8c2a67eb636..83eb1b714f8 100644 --- a/system/iptables.py +++ b/system/iptables.py @@ -208,6 +208,10 @@ options: - "ctstate is a list of the connection states to match in the conntrack module. Possible states are: 'INVALID', 'NEW', 'ESTABLISHED', 'RELATED', 'UNTRACKED', 'SNAT', 'DNAT'" required: false + limit: + description: + - "Specifies the maximum average number of matches to allow per second. The number can specify units explicitly, using `/second', `/minute', `/hour' or `/day', or parts of them (so `5/second' is the same as `5/s')." + required: false ''' EXAMPLES = ''' @@ -244,6 +248,11 @@ def append_conntrack(rule, param): rule.extend(['-m']) rule.extend(['conntrack']) +def append_limit(rule, param): + if param: + rule.extend(['-m']) + rule.extend(['limit']) + def construct_rule(params): rule = [] @@ -265,6 +274,8 @@ def construct_rule(params): if params['ctstate']: append_conntrack(rule, params['ctstate']) append_param(rule, ','.join(params['ctstate']), '--ctstate', False) + append_limit(rule, params['limit']) + append_param(rule, params['limit'], '--limit', False) return rule @@ -315,6 +326,7 @@ def main(): to_ports=dict(required=False, default=None, type='str'), comment=dict(required=False, default=None, type='str'), ctstate=dict(required=False, default=[], type='list'), + limit=dict(required=False, default=[], type='list'), ), ) args = dict( From 2b04f0c5cf5180269d12060b86d9cbded37b58e8 Mon Sep 17 00:00:00 2001 From: Romain Brucker Date: Tue, 3 Nov 2015 11:47:28 -0600 Subject: [PATCH 27/64] Fixing limit type from list to string --- system/iptables.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/iptables.py b/system/iptables.py index 83eb1b714f8..3e42a711db4 100644 --- a/system/iptables.py +++ b/system/iptables.py @@ -326,7 +326,7 @@ def main(): to_ports=dict(required=False, default=None, type='str'), comment=dict(required=False, default=None, type='str'), ctstate=dict(required=False, default=[], type='list'), - limit=dict(required=False, default=[], type='list'), + limit=dict(required=False, default=None, type='str'), ), ) args = dict( From f281eb2b30ca5a88f28c8a2c3ee5983b4f42bf54 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Tue, 3 Nov 2015 15:48:58 -0500 Subject: [PATCH 28/64] Add new SMEs for Zabbix --- REVIEWERS.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/REVIEWERS.md b/REVIEWERS.md index 06263169ec7..fe7392d7f04 100644 --- a/REVIEWERS.md +++ b/REVIEWERS.md @@ -49,6 +49,8 @@ Docker: @cove @joshuaconner @softzilla @smashwilson Red Hat Network: @barnabycourt @vritant @flossware +Zabbix: @cove @harrisongu @abulimov + PR Process ======= From 437a62836f71b725900bee28d845c1a0aca15129 Mon Sep 17 00:00:00 2001 From: Joel Thompson Date: Tue, 3 Nov 2015 23:01:50 -0500 Subject: [PATCH 29/64] Add sns_topic module to manage AWS SNS topics This adds an sns_topic module which allows you to create and delete AWS SNS topics as well as subscriptions to those topics. --- cloud/amazon/sns_topic.py | 261 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 261 insertions(+) create mode 100755 cloud/amazon/sns_topic.py diff --git a/cloud/amazon/sns_topic.py b/cloud/amazon/sns_topic.py new file mode 100755 index 00000000000..a9de7b88f10 --- /dev/null +++ b/cloud/amazon/sns_topic.py @@ -0,0 +1,261 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = """ +module: sns_topic +short_description: Manages AWS SNS topics and subscriptions +description: + - The M(sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics. +version_added: 2.0 +author: "Joel Thompson (@joelthompson)" +options: + name: + description: + - The name or ARN of the SNS topic to converge + required: true + state: + description: + - Whether to create or destroy an SNS topic + required: false + default: present + choices: ["absent", "present"] + display_name: + description: + - Display name of the topic + required: False + policy: + description: + - Policy to apply to the SNS topic + required: False + delivery_policy: + description: + - Delivery policy to apply to the SNS topic + required: False + subscriptions: + description: + - List of subscriptions to apply to the topic. Note that AWS requires + subscriptions to be confirmed, so you will need to confirm any new + subscriptions. + purge_subscriptions: + description: + - Whether to purge any subscriptions not listed here. NOTE: AWS does not + allow you to purge any PendingConfirmation subscriptions, so if any + exist and would be purged, they are silently skipped. This means that + somebody could come back later and confirm the subscription. Sorry. + Blame Amazon. + default: True +extends_documentation_fragment: aws +requirements: [ "boto" ] +""" + +EXAMPLES = """ + +- name: Create alarm SNS topic + sns_topic: + name: "alarms" + state: present + display_name: "alarm SNS topic" + delivery_policy: + http: + defaultHealthyRetryPolicy: + minDelayTarget: 2 + maxDelayTarget: 4 + numRetries: 3 + numMaxDelayRetries: 5 + backoffFunction: "" + disableSubscriptionOverrides: True + defaultThrottlePolicy: + maxReceivesPerSecond: 10 + subscriptions: + - endpoint: "my_email_address@example.com" + protocol: "email" + - endpoint: "my_mobile_number" + protocol: "sms" + +""" + +import sys +import time +import json +import re + +try: + import boto + import boto.sns +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + + +def canonicalize_endpoint(protocol, endpoint): + if protocol == 'sms': + import re + return re.sub('[^0-9]*', '', endpoint) + return endpoint + + + +def get_all_topics(connection): + next_token = None + topics = [] + while True: + response = connection.get_all_topics(next_token) + topics.extend(response['ListTopicsResponse']['ListTopicsResult']['Topics']) + next_token = \ + response['ListTopicsResponse']['ListTopicsResult']['NextToken'] + if not next_token: + break + return [t['TopicArn'] for t in topics] + + +def arn_topic_lookup(connection, short_topic): + # topic names cannot have colons, so this captures the full topic name + all_topics = get_all_topics(connection) + lookup_topic = ':%s' % short_topic + for topic in all_topics: + if topic.endswith(lookup_topic): + return topic + return None + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + name=dict(type='str', required=True), + state=dict(type='str', default='present', choices=['present', + 'absent']), + display_name=dict(type='str', required=False), + policy=dict(type='dict', required=False), + delivery_policy=dict(type='dict', required=False), + subscriptions=dict(type='list', required=False), + purge_subscriptions=dict(type='bool', default=True), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) + + name = module.params.get('name') + state = module.params.get('state') + display_name = module.params.get('display_name') + policy = module.params.get('policy') + delivery_policy = module.params.get('delivery_policy') + subscriptions = module.params.get('subscriptions') + purge_subscriptions = module.params.get('purge_subscriptions') + check_mode = module.check_mode + changed = False + + topic_created = False + attributes_set = [] + subscriptions_added = [] + subscriptions_deleted = [] + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg="region must be specified") + try: + connection = connect_to_aws(boto.sns, region, **aws_connect_params) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + # topics cannot contain ':', so thats the decider + if ':' in name: + all_topics = get_all_topics(connection) + if name in all_topics: + arn_topic = name + elif state == 'absent': + module.exit_json(changed=False) + else: + module.fail_json(msg="specified an ARN for a topic but it doesn't" + " exist") + else: + arn_topic = arn_topic_lookup(connection, name) + if not arn_topic: + if state == 'absent': + module.exit_json(changed=False) + elif check_mode: + module.exit_json(changed=True, topic_created=True, + subscriptions_added=subscriptions, + subscriptions_deleted=[]) + + changed=True + topic_created = True + connection.create_topic(name) + arn_topic = arn_topic_lookup(connection, name) + while not arn_topic: + time.sleep(3) + arn_topic = arn_topic_lookup(connection, name) + + if arn_topic and state == "absent": + if not check_mode: + connection.delete_topic(arn_topic) + module.exit_json(changed=True) + + topic_attributes = connection.get_topic_attributes(arn_topic) \ + ['GetTopicAttributesResponse'] ['GetTopicAttributesResult'] \ + ['Attributes'] + if display_name and display_name != topic_attributes['DisplayName']: + changed = True + attributes_set.append('display_name') + if not check_mode: + connection.set_topic_attributes(arn_topic, 'DisplayName', + display_name) + + if policy and policy != json.loads(topic_attributes['policy']): + changed = True + attributes_set.append('policy') + if not check_mode: + connection.set_topic_attributes(arn_topic, 'Policy', + json.dumps(policy)) + + if delivery_policy and ('DeliveryPolicy' not in topic_attributes or \ + delivery_policy != json.loads(topic_attributes['DeliveryPolicy'])): + changed = True + attributes_set.append('delivery_policy') + if not check_mode: + connection.set_topic_attributes(arn_topic, 'DeliveryPolicy', + json.dumps(delivery_policy)) + + + next_token = None + aws_subscriptions = [] + while True: + response = connection.get_all_subscriptions_by_topic(arn_topic, + next_token) + aws_subscriptions.extend(response['ListSubscriptionsByTopicResponse'] \ + ['ListSubscriptionsByTopicResult']['Subscriptions']) + next_token = response['ListSubscriptionsByTopicResponse'] \ + ['ListSubscriptionsByTopicResult']['NextToken'] + if not next_token: + break + + desired_subscriptions = [(sub['protocol'], + canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in + subscriptions] + aws_subscriptions_list = [] + + for sub in aws_subscriptions: + sub_key = (sub['Protocol'], sub['Endpoint']) + aws_subscriptions_list.append(sub_key) + if purge_subscriptions and sub_key not in desired_subscriptions and \ + sub['SubscriptionArn'] != 'PendingConfirmation': + changed = True + subscriptions_deleted.append(sub_key) + if not check_mode: + connection.unsubscribe(sub['SubscriptionArn']) + + for (protocol, endpoint) in desired_subscriptions: + if (protocol, endpoint) not in aws_subscriptions_list: + changed = True + subscriptions_added.append(sub) + if not check_mode: + connection.subscribe(arn_topic, protocol, endpoint) + + module.exit_json(changed=changed, topic_created=topic_created, + attributes_set=attributes_set, + subscriptions_added=subscriptions_added, + subscriptions_deleted=subscriptions_deleted, sns_arn=arn_topic) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() From 1ee6962c938272985e870299f8a04d58fee2e76f Mon Sep 17 00:00:00 2001 From: Etherdaemon Date: Wed, 4 Nov 2015 16:29:47 +1000 Subject: [PATCH 30/64] Add new module to allow for getting and listing of Route53 relevant details --- cloud/amazon/route53_facts.py | 434 ++++++++++++++++++++++++++++++++++ 1 file changed, 434 insertions(+) create mode 100644 cloud/amazon/route53_facts.py diff --git a/cloud/amazon/route53_facts.py b/cloud/amazon/route53_facts.py new file mode 100644 index 00000000000..16034acb51a --- /dev/null +++ b/cloud/amazon/route53_facts.py @@ -0,0 +1,434 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +module: route53_facts +short_description: Retrieves route53 details using AWS methods +description: + - Gets various details related to Route53 zone, record set or health check details +version_added: "2.0" +options: + query: + description: + - specifies the query action to take + required: True + choices: [ + 'change', + 'checker_ip_range', + 'health_check', + 'hosted_zone', + 'record_sets', + 'reusable_delegation_set', + ] + change_id: + description: + - The ID of the change batch request. + The value that you specify here is the value that + ChangeResourceRecordSets returned in the Id element + when you submitted the request. + required: false + hosted_zone_id: + description: + - The Hosted Zone ID of the DNS zone + required: false + max_items: + description: + - Maximum number of items to return for various get/list requests + required: false + next_marker: + description: + - Some requests such as list_command: hosted_zones will return a maximum + number of entries - EG 100. If the number of entries exceeds this maximum + another request can be sent using the NextMarker entry from the first response + to get the next page of results + required: false + delegation_set_id: + description: + - The DNS Zone delegation set ID + required: false + start_record_name: + description: + - The first name in the lexicographic ordering of domain names that you want + the list_command: record_sets to start listing from + required: false + type: + description: + - The type of DNS record + required: false + choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS' ] + dns_name: + description: + - The first name in the lexicographic ordering of domain names that you want + the list_command to start listing from + required: false + resource_id: + description: + - The ID/s of the specified resource/s + required: false + aliases: ['resource_ids'] + health_check_id: + description: + - The ID of the health check + required: false + hosted_zone_method: + description: + - This is used in conjunction with query: hosted_zone. + It allows for listing details, counts or tags of various + hosted zone details. + required: false + choices: [ + 'details', + 'list', + 'list_by_name', + 'count', + 'tags', + ] + default: 'list' + health_check_method: + description: + - This is used in conjunction with query: health_check. + It allows for listing details, counts or tags of various + health check details. + required: false + choices: [ + 'list', + 'details', + 'status', + 'failure_reason', + 'count', + 'tags', + ] + default: 'list' +author: Karen Cheng(@Etherdaemon) +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Simple example of listing all hosted zones +- name: List all hosted zones + route53_facts: + query: hosted_zone + register: hosted_zones + +# Getting a count of hosted zones +- name: Return a count of all hosted zones + route53_facts: + query: hosted_zone + hosted_zone_method: count + register: hosted_zone_count + +- name: List the first 20 resource record sets in a given hosted zone + route53_facts: + profile: account_name + query: record_sets + hosted_zone_id: 'ZZZ1111112222' + max_items: 20 + register: record_sets + +- name: List first 20 health checks + route53_facts: + query: health_check + health_check_method: list + max_items: 20 + register: health_checks + +- name: Get health check last failure_reason + route53_facts: + query: health_check + health_check_method: failure_reason + health_check_id: '00000000-1111-2222-3333-12345678abcd' + register: health_check_failure_reason + +- name: Retrieve reusable delegation set details + route53_facts: + query: reusable_delegation_set + delegation_set_id: 'delegation id' + register: delegation_sets + +''' +try: + import json + import boto + import botocore + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +try: + import boto3 + HAS_BOTO3 = True +except ImportError: + HAS_BOTO3 = False + + +def get_hosted_zone(client, module): + params = dict() + + if module.params.get('hosted_zone_id'): + params['HostedZoneId'] = module.params.get('hosted_zone_id') + else: + module.fail_json(msg="Hosted Zone Id is required") + + results = client.get_hosted_zone(**params) + return results + + +def reusable_delegation_set_details(client, module): + params = dict() + if not module.params.get('delegation_set_id'): + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + results = client.list_reusable_delegation_sets(**params) + else: + params['DelegationSetId'] = module.params.get('delegation_set_id') + results = client.get_reusable_delegation_set(**params) + + return results + + +def list_hosted_zones(client, module): + params = dict() + + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + if module.params.get('delegation_set_id'): + params['DelegationSetId'] = module.params.get('delegation_set_id') + + results = client.list_hosted_zones(**params) + return results + + +def list_hosted_zones_by_name(client, module): + params = dict() + + if module.params.get('hosted_zone_id'): + params['HostedZoneId'] = module.params.get('hosted_zone_id') + + if module.params.get('dns_name'): + params['DNSName'] = module.params.get('dns_name') + + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + results = client.list_hosted_zones_by_name(**params) + return results + + +def change_details(client, module): + params = dict() + + if module.params.get('change_id'): + params['Id'] = module.params.get('change_id') + else: + module.fail_json(msg="change_id is required") + + results = client.get_change(**params) + return results + + +def checker_ip_range_details(client, module): + results = client.get_checker_ip_ranges() + return results + + +def get_count(client, module): + if module.params.get('query') == 'health_check': + results = client.get_health_check_count() + else: + results = client.get_hosted_zone_count() + + return results + + +def get_health_check(client, module): + params = dict() + + if not module.params.get('health_check_id'): + module.fail_json(msg="health_check_id is required") + else: + params['HealthCheckId'] = module.params.get('health_check_id') + + if module.params.get('health_check_method') == 'details': + results = client.get_health_check(**params) + elif module.params.get('health_check_method') == 'failure_reason': + results = client.get_health_check_last_failure_reason(**params) + elif module.params.get('health_check_method') == 'status': + results = client.get_health_check_status(**params) + + return results + + +def get_resource_tags(client, module): + params = dict() + + if module.params.get('resource_id'): + params['ResourceIds'] = module.params.get('resource_id') + else: + module.fail_json(msg="resource_id or resource_ids is required") + + if module.params.get('query') == 'health_check': + params['ResourceType'] = 'healthcheck' + else: + params['ResourceType'] = 'hostedzone' + + results = client.list_tags_for_resources(**params) + return results + + +def list_health_checks(client, module): + params = dict() + + if module.params.get('max_items'): + params['MaxItems'] = module.params.get('max_items') + + if module.params.get('next_marker'): + params['Marker'] = module.params.get('next_marker') + + results = client.list_health_checks(**params) + return results + + +def record_sets_details(client, module): + params = dict() + + if module.params.get('hosted_zone_id'): + params['HostedZoneId'] = module.params.get('hosted_zone_id') + else: + module.fail_json(msg="Hosted Zone Id is required") + + if module.params.get('start_record_name'): + params['StartRecordName'] = module.params.get('start_record_name') + + if module.params.get('type') and not module.params.get('start_record_name'): + module.fail_json(msg="start_record_name must be specified if type is set") + elif module.params.get('type'): + params['StartRecordType'] = module.params.get('type') + + results = client.list_resource_record_sets(**params) + return results + + +def health_check_details(client, module): + health_check_invocations = { + 'list': list_health_checks, + 'details': get_health_check, + 'status': get_health_check, + 'failure_reason': get_health_check, + 'count': get_count, + 'tags': get_resource_tags, + } + + results = health_check_invocations[module.params.get('health_check_method')](client, module) + return results + + +def hosted_zone_details(client, module): + hosted_zone_invocations = { + 'details': get_hosted_zone, + 'list': list_hosted_zones, + 'list_by_name': list_hosted_zones_by_name, + 'count': get_count, + 'tags': get_resource_tags, + } + + results = hosted_zone_invocations[module.params.get('hosted_zone_method')](client, module) + return results + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + query=dict(choices=[ + 'change', + 'checker_ip_range', + 'health_check', + 'hosted_zone', + 'record_sets', + 'reusable_delegation_set', + ], required=True), + change_id=dict(), + hosted_zone_id=dict(), + max_items=dict(type='str'), + next_marker=dict(), + delegation_set_id=dict(), + start_record_name=dict(), + type=dict(choices=[ + 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS' + ]), + dns_name=dict(), + resource_id=dict(type='list', aliases=['resource_ids']), + health_check_id=dict(), + hosted_zone_method=dict(choices=[ + 'details', + 'list', + 'list_by_name', + 'count', + 'tags' + ], default='list'), + health_check_method=dict(choices=[ + 'list', + 'details', + 'status', + 'failure_reason', + 'count', + 'tags', + ], default='list'), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['hosted_zone_method', 'health_check_method'], + ], + ) + + # Validate Requirements + if not (HAS_BOTO or HAS_BOTO3): + module.fail_json(msg='json and boto/boto3 is required.') + + try: + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) + route53 = boto3_conn(module, conn_type='client', resource='route53', region=region, endpoint=ec2_url, **aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg="Can't authorize connection - "+str(e)) + + invocations = { + 'change': change_details, + 'checker_ip_range': checker_ip_range_details, + 'health_check': health_check_details, + 'hosted_zone': hosted_zone_details, + 'record_sets': record_sets_details, + 'reusable_delegation_set': reusable_delegation_set_details, + } + results = invocations[module.params.get('query')](route53, module) + + module.exit_json(**results) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() From 63003372b4f87f6f775b23cc9af491630647b48b Mon Sep 17 00:00:00 2001 From: root Date: Thu, 5 Nov 2015 21:16:41 -0600 Subject: [PATCH 31/64] Fixed call to module.log --- network/openvswitch_port.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/openvswitch_port.py b/network/openvswitch_port.py index e98453fc95f..5fbbe8480dd 100644 --- a/network/openvswitch_port.py +++ b/network/openvswitch_port.py @@ -140,7 +140,7 @@ class OVSPort(object): def set(self, set_opt): """ Set attributes on a port. """ - self.module("set called %s" % set_opt) + self.module.log("set called %s" % set_opt) if (not set_opt): return False From 5753a05625ec5a8da08039a5277b87ee6501b696 Mon Sep 17 00:00:00 2001 From: Hans-Joachim Kliemeck Date: Thu, 5 Nov 2015 17:50:47 +0100 Subject: [PATCH 32/64] fixxed problem with match @ --- windows/win_nssm.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_nssm.ps1 b/windows/win_nssm.ps1 index bf4e798fca5..fa61afdaafc 100644 --- a/windows/win_nssm.ps1 +++ b/windows/win_nssm.ps1 @@ -352,7 +352,7 @@ Function Nssm-Update-Credentials } else { $fullUser = $user - If (-not($user -contains "@") -and ($user.Split("\").count -eq 1)) { + If (-Not($user.contains("@")) -And ($user.Split("\").count -eq 1)) { $fullUser = ".\" + $user } From 2ac53bf559232f1ba83548c5f7d2ca208afb25ee Mon Sep 17 00:00:00 2001 From: Kerim Satirli Date: Fri, 6 Nov 2015 11:02:51 +0100 Subject: [PATCH 33/64] fixes a typo in Datadog Monitor docs --- monitoring/datadog_monitor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/datadog_monitor.py b/monitoring/datadog_monitor.py index 9853d748c2c..9318326620e 100644 --- a/monitoring/datadog_monitor.py +++ b/monitoring/datadog_monitor.py @@ -54,7 +54,7 @@ options: default: null choices: ['metric alert', 'service check'] query: - description: ["he monitor query to notify on with syntax varying depending on what type of monitor you are creating."] + description: ["The monitor query to notify on with syntax varying depending on what type of monitor you are creating."] required: false default: null name: From b51d096c317623251b251c013936ee0a90636b23 Mon Sep 17 00:00:00 2001 From: Jimmy Tang Date: Fri, 6 Nov 2015 13:14:55 +0000 Subject: [PATCH 34/64] Fix documentation, the correct parameter is "name" --- clustering/znode.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clustering/znode.py b/clustering/znode.py index 8effcd9189e..51ab51d0ea4 100644 --- a/clustering/znode.py +++ b/clustering/znode.py @@ -26,7 +26,7 @@ options: description: - A list of ZooKeeper servers (format '[server]:[port]'). required: true - path: + name: description: - The path of the znode. required: true From e1c1ea9013dcde6e66b8e21e35bbce9638fa4e7e Mon Sep 17 00:00:00 2001 From: Kenny Gryp Date: Mon, 9 Nov 2015 10:05:53 +0100 Subject: [PATCH 35/64] in order for replication setup to work, some errors should be ignored --- database/mysql/mysql_replication.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index c8e342a1d23..f01ffa76dc3 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -311,7 +311,7 @@ def main(): if not mysqldb_found: module.fail_json(msg="the python mysqldb module is required") else: - warnings.filterwarnings('error', category=MySQLdb.Warning) + warnings.filterwarnings('ignore', category=MySQLdb.Warning) # Either the caller passes both a username and password with which to connect to # mysql, or they pass neither and allow this module to read the credentials from From 53d42cd8d870db500ff623c158365762c5b31a0b Mon Sep 17 00:00:00 2001 From: Kenny Gryp Date: Mon, 9 Nov 2015 10:07:15 +0100 Subject: [PATCH 36/64] revert to unbreak pull request --- database/mysql/mysql_replication.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_replication.py b/database/mysql/mysql_replication.py index f01ffa76dc3..c8e342a1d23 100644 --- a/database/mysql/mysql_replication.py +++ b/database/mysql/mysql_replication.py @@ -311,7 +311,7 @@ def main(): if not mysqldb_found: module.fail_json(msg="the python mysqldb module is required") else: - warnings.filterwarnings('ignore', category=MySQLdb.Warning) + warnings.filterwarnings('error', category=MySQLdb.Warning) # Either the caller passes both a username and password with which to connect to # mysql, or they pass neither and allow this module to read the credentials from From 5e103d604a707c039a0930c330e7569f867b05a7 Mon Sep 17 00:00:00 2001 From: Ritesh Khadgaray Date: Mon, 9 Nov 2015 20:21:28 +0530 Subject: [PATCH 37/64] allows user to not update zabbix host config if host is present. --- monitoring/zabbix_host.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index 6fac82c7177..3cb27c5fbb9 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -91,6 +91,13 @@ options: - 'https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface' required: false default: [] + force: + description: + - Overwrite the host configuration, even if already present + required: false + default: "yes" + choices: [ "yes", "no" ] + version_added: "2.0" ''' EXAMPLES = ''' @@ -370,6 +377,7 @@ def main(): state=dict(default="present", choices=['present', 'absent']), timeout=dict(type='int', default=10), interfaces=dict(required=False), + force=dict(default='yes', choices='bool'), proxy=dict(required=False) ), supports_check_mode=True @@ -388,6 +396,7 @@ def main(): state = module.params['state'] timeout = module.params['timeout'] interfaces = module.params['interfaces'] + force = module.params['force'] proxy = module.params['proxy'] # convert enabled to 0; disabled to 1 @@ -439,6 +448,9 @@ def main(): if not group_ids: module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name) + if not force: + module.fail_json(changed=False, result="Host present, Can't update configuration without force") + # get exist host's interfaces exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id}) exist_interfaces_copy = copy.deepcopy(exist_interfaces) From b01f083ec333bf04b45d149f7d3e654967d47552 Mon Sep 17 00:00:00 2001 From: Alberto Gireud Date: Sat, 7 Nov 2015 11:48:46 -0600 Subject: [PATCH 38/64] Add openstack project module --- cloud/openstack/os_project.py | 201 ++++++++++++++++++++++++++++++++++ 1 file changed, 201 insertions(+) create mode 100644 cloud/openstack/os_project.py diff --git a/cloud/openstack/os_project.py b/cloud/openstack/os_project.py new file mode 100644 index 00000000000..c1958774976 --- /dev/null +++ b/cloud/openstack/os_project.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# Copyright (c) 2015 IBM Corporation +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +DOCUMENTATION = ''' +--- +module: os_project +short_description: Manage OpenStack Projects +extends_documentation_fragment: openstack +version_added: "2.0" +author: "Alberto Gireud (@agireud)" +description: + - Manage OpenStack Projects. Projects can be created, + updated or deleted using this module. A project will be updated + if I(name) matches an existing project and I(state) is present. + The value for I(name) cannot be updated without deleting and + re-creating the project. +options: + name: + description: + - Name for the project + required: true + description: + description: + - Description for the project + required: false + default: None + domain_id: + description: + - Domain id to create the project in if the cloud supports domains + required: false + default: None + enabled: + description: + - Is the project enabled + required: false + default: True + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +requirements: + - "python >= 2.6" + - "shade" +''' + +EXAMPLES = ''' +# Create a project +- os_project: + cloud: mycloud + state: present + name: demoproject + description: demodescription + domain_id: demoid + enabled: True + +# Delete a project +- os_project: + cloud: mycloud + state: absent + name: demoproject +''' + + +RETURN = ''' +project: + description: Dictionary describing the project. + returned: On success when I(state) is 'present' + type: dictionary + contains: + description: + description: Project description + type: string + sample: "demodescription" + domain_id: + description: Project domain ID. Only present with Keystone >= v3. + type: string + sample: "default" + id: + description: Project ID + type: string + sample: "f59382db809c43139982ca4189404650" + name: + description: Project name + type: string + sample: "demoproject" +''' + +def _needs_update(module, project): + keys = ('description', 'enabled') + for key in keys: + if module.params[key] is not None and module.params[key] != project.get(key): + return True + + return False + +def _system_state_change(module, project): + state = module.params['state'] + if state == 'present': + if project is None: + changed = True + else: + if _needs_update(module, project): + changed = True + else: + changed = False + + elif state == 'absent': + if project is None: + changed=False + else: + changed=True + + return changed; + +def main(): + + argument_spec = openstack_full_argument_spec( + name=dict(required=True), + description=dict(required=False, default=None), + domain=dict(required=False, default=None), + enabled=dict(default=True, type='bool'), + state=dict(default='present', choices=['absent', 'present']) + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule( + argument_spec, + supports_check_mode=True, + **module_kwargs + ) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + name = module.params['name'] + description = module.params['description'] + domain = module.params['domain'] + enabled = module.params['enabled'] + state = module.params['state'] + + try: + cloud = shade.openstack_cloud(**module.params) + project = cloud.get_project(name) + + if module.check_mode: + module.exit_json(changed=_system_state_change(module, project)) + + if state == 'present': + if project is None: + project = cloud.create_project( + name=name, description=description, + domain_id=domain, + enabled=enabled) + changed = True + else: + if _needs_update(module, project): + project = cloud.update_project( + project['id'], description=description, + enabled=enabled) + changed = True + else: + changed = False + module.exit_json(changed=changed, project=project) + + elif state == 'absent': + if project is None: + changed=False + else: + cloud.delete_project(project['id']) + changed=True + module.exit_json(changed=changed) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message, extra_data=e.extra_data) + +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * + +if __name__ == '__main__': + main() From e0bdd2e7f686ca0c7f382c1c2766765de216a019 Mon Sep 17 00:00:00 2001 From: Romain Brucker Date: Tue, 10 Nov 2015 09:21:32 -0600 Subject: [PATCH 39/64] Editing iptable module to use -m state --state instead of -m conntrack --ctstate --- system/iptables.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/iptables.py b/system/iptables.py index 3e42a711db4..b9368e0688f 100644 --- a/system/iptables.py +++ b/system/iptables.py @@ -246,7 +246,7 @@ def append_comm(rule, param): def append_conntrack(rule, param): if param: rule.extend(['-m']) - rule.extend(['conntrack']) + rule.extend(['state']) def append_limit(rule, param): if param: @@ -273,7 +273,7 @@ def construct_rule(params): append_param(rule, params['comment'], '--comment', False) if params['ctstate']: append_conntrack(rule, params['ctstate']) - append_param(rule, ','.join(params['ctstate']), '--ctstate', False) + append_param(rule, ','.join(params['ctstate']), '--state', False) append_limit(rule, params['limit']) append_param(rule, params['limit'], '--limit', False) return rule From 9f02fbe07244f37422b128caa59f82ae559edb64 Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Wed, 11 Nov 2015 11:57:11 +0100 Subject: [PATCH 40/64] better cope with rpm not returning package name if the rpm query is missing a package name (or giving some error): fail soft before the patch: the module fails because the installed_state dict is missing the package name after the patch: the missing package is assumed to not be in the correct state and is installed/removed with zypper --- packaging/os/zypper.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/packaging/os/zypper.py b/packaging/os/zypper.py index b1155c6014d..0a693543d45 100644 --- a/packaging/os/zypper.py +++ b/packaging/os/zypper.py @@ -161,7 +161,7 @@ def get_package_state(m, packages): for stdoutline in stdout.splitlines(): match = rpmoutput_re.match(stdoutline) if match == None: - return None + continue package = match.group(1) result = match.group(2) if result == 'is installed': @@ -169,18 +169,13 @@ def get_package_state(m, packages): else: installed_state[package] = False - for package in packages: - if package not in installed_state: - print package + ' was not returned by rpm \n' - return None - return installed_state # Function used to make sure a package is present. def package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper): packages = [] for package in name: - if installed_state[package] is False: + if package not in installed_state or installed_state[package] is False: packages.append(package) if len(packages) != 0: cmd = ['/usr/bin/zypper', '--non-interactive'] @@ -246,7 +241,7 @@ def package_latest(m, name, installed_state, package_type, disable_gpg_check, di def package_absent(m, name, installed_state, package_type, old_zypper): packages = [] for package in name: - if installed_state[package] is True: + if package not in installed_state or installed_state[package] is True: packages.append(package) if len(packages) != 0: cmd = ['/usr/bin/zypper', '--non-interactive', 'remove', '-t', package_type] From e52e015791a7c1cec3d41c25b18a4243920579b6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 11 Nov 2015 12:38:51 -0800 Subject: [PATCH 41/64] Documentation fixes --- cloud/amazon/route53_facts.py | 16 ++++++++-------- cloud/amazon/sns_topic.py | 8 ++++---- packaging/os/homebrew.py | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/cloud/amazon/route53_facts.py b/cloud/amazon/route53_facts.py index 16034acb51a..d6081dba4da 100644 --- a/cloud/amazon/route53_facts.py +++ b/cloud/amazon/route53_facts.py @@ -50,10 +50,10 @@ options: required: false next_marker: description: - - Some requests such as list_command: hosted_zones will return a maximum + - "Some requests such as list_command: hosted_zones will return a maximum number of entries - EG 100. If the number of entries exceeds this maximum another request can be sent using the NextMarker entry from the first response - to get the next page of results + to get the next page of results" required: false delegation_set_id: description: @@ -61,8 +61,8 @@ options: required: false start_record_name: description: - - The first name in the lexicographic ordering of domain names that you want - the list_command: record_sets to start listing from + - "The first name in the lexicographic ordering of domain names that you want + the list_command: record_sets to start listing from" required: false type: description: @@ -85,9 +85,9 @@ options: required: false hosted_zone_method: description: - - This is used in conjunction with query: hosted_zone. + - "This is used in conjunction with query: hosted_zone. It allows for listing details, counts or tags of various - hosted zone details. + hosted zone details." required: false choices: [ 'details', @@ -99,9 +99,9 @@ options: default: 'list' health_check_method: description: - - This is used in conjunction with query: health_check. + - "This is used in conjunction with query: health_check. It allows for listing details, counts or tags of various - health check details. + health check details." required: false choices: [ 'list', diff --git a/cloud/amazon/sns_topic.py b/cloud/amazon/sns_topic.py index a9de7b88f10..92d63d02c18 100755 --- a/cloud/amazon/sns_topic.py +++ b/cloud/amazon/sns_topic.py @@ -34,15 +34,15 @@ options: subscriptions: description: - List of subscriptions to apply to the topic. Note that AWS requires - subscriptions to be confirmed, so you will need to confirm any new - subscriptions. + subscriptions to be confirmed, so you will need to confirm any new + subscriptions. purge_subscriptions: description: - - Whether to purge any subscriptions not listed here. NOTE: AWS does not + - "Whether to purge any subscriptions not listed here. NOTE: AWS does not allow you to purge any PendingConfirmation subscriptions, so if any exist and would be purged, they are silently skipped. This means that somebody could come back later and confirm the subscription. Sorry. - Blame Amazon. + Blame Amazon." default: True extends_documentation_fragment: aws requirements: [ "boto" ] diff --git a/packaging/os/homebrew.py b/packaging/os/homebrew.py index 5225e8091c5..94d0ef865c4 100644 --- a/packaging/os/homebrew.py +++ b/packaging/os/homebrew.py @@ -39,7 +39,7 @@ options: default: None path: description: - - ':' separated list of paths to search for 'brew' executable. Since A package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command, providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system. + - "':' separated list of paths to search for 'brew' executable. Since A package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command, providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system." required: false default: '/usr/local/bin' state: From 19b506f64f94b8236fe53c6cba8d0691d17dfeb4 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Thu, 12 Nov 2015 17:49:44 -0500 Subject: [PATCH 42/64] Added style= and more colors. --- notification/irc.py | 54 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 46 insertions(+), 8 deletions(-) diff --git a/notification/irc.py b/notification/irc.py index 28ad4417ac1..d87d26d367e 100644 --- a/notification/irc.py +++ b/notification/irc.py @@ -56,9 +56,11 @@ options: color: description: - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none"). + Added 11 more colors in version 2.0. required: false default: "none" - choices: [ "none", "yellow", "red", "green", "blue", "black" ] + choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan", + "light_blue", "pink", "gray", "light_gray"] channel: description: - Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them. @@ -95,6 +97,13 @@ options: Useful for when using a faux bot and not wanting join/parts between messages. default: True version_added: "2.0" + style: + description: + - Text style for the message. Note italic does not work on some clients + default: None + required: False + choices: [ "bold", "underline", "reverse", "italic" ] + version_added: "2.0" # informational: requirements for nodes requirements: [ socket ] @@ -134,24 +143,47 @@ from time import sleep def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=[], key=None, topic=None, - nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True): + nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None): '''send message to IRC''' colornumbers = { + 'white': "00", 'black': "01", + 'blue': "02", + 'green': "03", 'red': "04", - 'green': "09", + 'brown': "05", + 'purple': "06", + 'orange': "07", 'yellow': "08", - 'blue': "12", + 'light_green': "09", + 'teal': "10", + 'light_cyan': "11", + 'light_blue': "12", + 'pink': "13", + 'gray': "14", + 'light_gray': "15", + } + + stylechoices = { + 'bold': "\x02", + 'underline': "\x1F", + 'reverse': "\x16", + 'italic': "\x1D", } + try: + styletext = stylechoices[style] + except: + styletext = "" + try: colornumber = colornumbers[color] colortext = "\x03" + colornumber except: colortext = "" - message = colortext + msg + message = styletext + colortext + msg irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if use_ssl: @@ -219,8 +251,13 @@ def main(): nick=dict(default='ansible'), nick_to=dict(required=False, type='list'), msg=dict(required=True), - color=dict(default="none", choices=["yellow", "red", "green", - "blue", "black", "none"]), + color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue", + "green", "red", "brown", + "purple", "orange", "yellow", + "light_green", "teal", "light_cyan", + "light_blue", "pink", "gray", + "light_gray", "none"]), + style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]), channel=dict(required=False), key=dict(), topic=dict(), @@ -248,9 +285,10 @@ def main(): timeout = module.params["timeout"] use_ssl = module.params["use_ssl"] part = module.params["part"] + style = module.params["style"] try: - send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part) + send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style) except Exception, e: module.fail_json(msg="unable to send to IRC: %s" % e) From e5362cc76a25a734ddacf4d8ac496d9127c4a46d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 13 Nov 2015 16:47:22 -0500 Subject: [PATCH 43/64] Version bump for new beta 2.0.0-0.5.beta3 --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index f802f1a2cdb..47c909bbc53 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0.0-0.4.beta2 +2.0.0-0.5.beta3 From 875a0551032e721f06625c237bd7c2a6fecaa7fd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 15 Nov 2015 14:31:34 -0800 Subject: [PATCH 44/64] corrected choices which was meant to be type --- monitoring/zabbix_host.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/zabbix_host.py b/monitoring/zabbix_host.py index 3cb27c5fbb9..5b6748a3e94 100644 --- a/monitoring/zabbix_host.py +++ b/monitoring/zabbix_host.py @@ -377,7 +377,7 @@ def main(): state=dict(default="present", choices=['present', 'absent']), timeout=dict(type='int', default=10), interfaces=dict(required=False), - force=dict(default='yes', choices='bool'), + force=dict(default=True, type='bool'), proxy=dict(required=False) ), supports_check_mode=True From 39a3255ef3d873e1b9974160ce211683f552173b Mon Sep 17 00:00:00 2001 From: Jonathan Davila Date: Mon, 16 Nov 2015 16:54:35 -0500 Subject: [PATCH 45/64] Sendgrid docs fix --- notification/sendgrid.py | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/notification/sendgrid.py b/notification/sendgrid.py index 2655b4248bb..1bac1e5f724 100644 --- a/notification/sendgrid.py +++ b/notification/sendgrid.py @@ -24,33 +24,34 @@ version_added: "2.0" module: sendgrid short_description: Sends an email with the SendGrid API description: - - Sends an email with a SendGrid account through their API, not through - the SMTP service. + - "Sends an email with a SendGrid account through their API, not through + the SMTP service." notes: - - This module is non-idempotent because it sends an email through the - external API. It is idempotent only in the case that the module fails. - - Like the other notification modules, this one requires an external + - "This module is non-idempotent because it sends an email through the + external API. It is idempotent only in the case that the module fails." + - "Like the other notification modules, this one requires an external dependency to work. In this case, you'll need an active SendGrid - account. + account." options: username: description: - username for logging into the SendGrid account + - username for logging into the SendGrid account required: true password: - description: password that corresponds to the username + description: + - password that corresponds to the username required: true from_address: description: - the address in the "from" field for the email + - the address in the "from" field for the email required: true to_addresses: description: - a list with one or more recipient email addresses + - a list with one or more recipient email addresses required: true subject: description: - the desired subject for the email + - the desired subject for the email required: true author: "Matt Makai (@makaimc)" From c9e4c32f41ee6eb5edc0a513139dcaf24353ae00 Mon Sep 17 00:00:00 2001 From: Alberto Gireud Date: Mon, 16 Nov 2015 17:31:53 -0600 Subject: [PATCH 46/64] Fix return documentation --- cloud/openstack/os_project.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cloud/openstack/os_project.py b/cloud/openstack/os_project.py index c1958774976..37901f8412e 100644 --- a/cloud/openstack/os_project.py +++ b/cloud/openstack/os_project.py @@ -88,14 +88,6 @@ project: returned: On success when I(state) is 'present' type: dictionary contains: - description: - description: Project description - type: string - sample: "demodescription" - domain_id: - description: Project domain ID. Only present with Keystone >= v3. - type: string - sample: "default" id: description: Project ID type: string @@ -104,6 +96,14 @@ project: description: Project name type: string sample: "demoproject" + description: + description: Project description + type: string + sample: "demodescription" + enabled: + description: Boolean to indicate if project is enabled + type: bool + sample: True ''' def _needs_update(module, project): From c1cf8e671a5b263daa3f998360333758ece5c4e9 Mon Sep 17 00:00:00 2001 From: Xav Paice Date: Wed, 18 Nov 2015 14:33:25 +1300 Subject: [PATCH 47/64] Added stdout and stderr to puppet output for rc=2 --- system/puppet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/puppet.py b/system/puppet.py index 48a497c37ce..ab1339ec5ba 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -197,7 +197,7 @@ def main(): error=True, stdout=stdout, stderr=stderr) elif rc == 2: # success with changes - module.exit_json(rc=0, changed=True) + module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr) elif rc == 124: # timeout module.exit_json( From a59f1f528e67daf580c9e1859a5fb719914e7834 Mon Sep 17 00:00:00 2001 From: Hans-Joachim Kliemeck Date: Fri, 20 Nov 2015 09:08:42 +0100 Subject: [PATCH 48/64] fix race condition and missing property --- windows/win_updates.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_updates.ps1 b/windows/win_updates.ps1 index 3d5bc4c57c4..890e3670d86 100644 --- a/windows/win_updates.ps1 +++ b/windows/win_updates.ps1 @@ -337,7 +337,7 @@ Function RunAsScheduledJob { $sw = [System.Diagnostics.Stopwatch]::StartNew() # NB: output from scheduled jobs is delayed after completion (including the sub-objects after the primary Output object is available) - While (($job.Output -eq $null -or -not $job.Output.Keys.Contains('job_output')) -and $sw.ElapsedMilliseconds -lt 15000) { + While (($job.Output -eq $null -or -not ($job.Output | Get-Member -Name Keys) -or -not $job.Output.Keys.Contains('job_output')) -and $sw.ElapsedMilliseconds -lt 15000) { Write-DebugLog "Waiting for job output to populate..." Start-Sleep -Milliseconds 500 } From a56fe04683031e1eec0414c1726df4704c7ff79f Mon Sep 17 00:00:00 2001 From: Ryan Sydnor Date: Fri, 20 Nov 2015 13:55:44 -0500 Subject: [PATCH 49/64] Use boto normalized location for bucket creation If a bucket is being created in us-east-1, the module passed 'us-east-1' to boto's s3.create_bucket method rather than Location.DEFAULT (an empty string). This caused boto to generate invalid XML which AWS was unable to interpret. --- cloud/amazon/s3_bucket.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/s3_bucket.py b/cloud/amazon/s3_bucket.py index aa6cc9d1e41..22e68927016 100644 --- a/cloud/amazon/s3_bucket.py +++ b/cloud/amazon/s3_bucket.py @@ -129,11 +129,10 @@ def create_tags_container(tags): tags_obj.add_tag_set(tag_set) return tags_obj -def create_bucket(connection, module): +def create_bucket(connection, module, location): policy = module.params.get("policy") name = module.params.get("name") - region = module.params.get("region") requester_pays = module.params.get("requester_pays") tags = module.params.get("tags") versioning = module.params.get("versioning") @@ -143,7 +142,7 @@ def create_bucket(connection, module): bucket = connection.get_bucket(name) except S3ResponseError, e: try: - bucket = connection.create_bucket(name, location=region) + bucket = connection.create_bucket(name, location=location) changed = True except S3CreateError, e: module.fail_json(msg=e.message) @@ -376,7 +375,7 @@ def main(): state = module.params.get("state") if state == 'present': - create_bucket(connection, module) + create_bucket(connection, module, location) elif state == 'absent': destroy_bucket(connection, module) From 66964f660aa62bff95fc0a2ab2444a2110eabd4b Mon Sep 17 00:00:00 2001 From: Olaf Kilian Date: Fri, 20 Nov 2015 20:25:50 +0100 Subject: [PATCH 50/64] Set no_log for password argument --- cloud/docker/docker_login.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py index c00dc3f900d..4fc4decbe62 100644 --- a/cloud/docker/docker_login.py +++ b/cloud/docker/docker_login.py @@ -220,7 +220,7 @@ def main(): argument_spec = dict( registry = dict(required=False, default='https://index.docker.io/v1/'), username = dict(required=True), - password = dict(required=True), + password = dict(required=True, no_log=True), email = dict(required=False, default=None), reauth = dict(required=False, default=False, type='bool'), dockercfg_path = dict(required=False, default='~/.docker/config.json'), From c629d5b0139c0dcd680af273745ab59ff30b19c0 Mon Sep 17 00:00:00 2001 From: Olaf Kilian Date: Fri, 20 Nov 2015 21:05:19 +0100 Subject: [PATCH 51/64] Add requirement and check for compatible version of docker-py --- cloud/docker/docker_login.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py index 4fc4decbe62..cdc1f95d042 100644 --- a/cloud/docker/docker_login.py +++ b/cloud/docker/docker_login.py @@ -70,7 +70,7 @@ options: required: false default: 600 -requirements: [ "python >= 2.6", "docker-py" ] +requirements: [ "python >= 2.6", "docker-py >= 1.1.0" ] ''' EXAMPLES = ''' @@ -102,11 +102,16 @@ import os.path import json import base64 from urlparse import urlparse +from distutils.version import StrictVersion try: import docker.client from docker.errors import APIError as DockerAPIError has_lib_docker = True + if StrictVersion(docker.__version__) >= StrictVersion("1.1.0"): + has_correct_lib_docker_version = True + else: + has_correct_lib_docker_version = False except ImportError, e: has_lib_docker = False @@ -231,7 +236,10 @@ def main(): ) if not has_lib_docker: - module.fail_json(msg="python library docker-py required: pip install docker-py==1.1.0") + module.fail_json(msg="python library docker-py required: pip install docker-py>=1.1.0") + + if not has_correct_lib_docker_version: + module.fail_json(msg="your version of docker-py is outdated: pip install docker-py>=1.1.0") if not has_lib_requests_execeptions: module.fail_json(msg="python library requests required: pip install requests") From 19374903ac679ee100a146f0615b92517020193b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 20 Nov 2015 12:36:36 -0800 Subject: [PATCH 52/64] Switch StrictVersion for LooseVersion since some distros ship beta versions and StrictVersion would fail on that. Also clean up some minor style things --- cloud/docker/docker_login.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py index cdc1f95d042..b2117464fd6 100644 --- a/cloud/docker/docker_login.py +++ b/cloud/docker/docker_login.py @@ -102,24 +102,25 @@ import os.path import json import base64 from urlparse import urlparse -from distutils.version import StrictVersion +from distutils.version import LooseVersion try: import docker.client from docker.errors import APIError as DockerAPIError has_lib_docker = True - if StrictVersion(docker.__version__) >= StrictVersion("1.1.0"): + if LooseVersion(docker.__version__) >= LooseVersion("1.1.0"): has_correct_lib_docker_version = True else: has_correct_lib_docker_version = False -except ImportError, e: +except ImportError: has_lib_docker = False try: - from requests.exceptions import * - has_lib_requests_execeptions = True -except ImportError, e: - has_lib_requests_execeptions = False + import requests + has_lib_requests = True +except ImportError: + has_lib_requests = False + class DockerLoginManager: @@ -161,7 +162,7 @@ class DockerLoginManager: self.module.fail_json(msg="failed to login to the remote registry", error=repr(e)) # Get status from registry response. - if self.response.has_key("Status"): + if "Status" in self.response: self.log.append(self.response["Status"]) # Update the dockercfg if not in check mode. @@ -186,9 +187,9 @@ class DockerLoginManager: docker_config = json.load(open(self.dockercfg_path, "r")) except ValueError: docker_config = dict() - if not docker_config.has_key("auths"): + if "auths" not in docker_config: docker_config["auths"] = dict() - if not docker_config["auths"].has_key(self.registry): + if self.registry not in docker_config["auths"]: docker_config["auths"][self.registry] = dict() # Calculate docker credentials based on current parameters. @@ -219,6 +220,7 @@ class DockerLoginManager: def has_changed(self): return self.changed + def main(): module = AnsibleModule( @@ -241,7 +243,7 @@ def main(): if not has_correct_lib_docker_version: module.fail_json(msg="your version of docker-py is outdated: pip install docker-py>=1.1.0") - if not has_lib_requests_execeptions: + if not has_lib_requests: module.fail_json(msg="python library requests required: pip install requests") try: From b87e3ce36258e9c7d5184be2af9cf2e4aa73574f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 20 Nov 2015 13:57:58 -0800 Subject: [PATCH 53/64] Quote strings that make the module docs fail to build --- cloud/docker/docker_login.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker_login.py b/cloud/docker/docker_login.py index b2117464fd6..05fac1dd5d0 100644 --- a/cloud/docker/docker_login.py +++ b/cloud/docker/docker_login.py @@ -33,9 +33,9 @@ description: options: registry: description: - - URL of the registry, defaults to: https://index.docker.io/v1/ + - "URL of the registry, defaults to: https://index.docker.io/v1/" required: false - default: https://index.docker.io/v1/ + default: "https://index.docker.io/v1/" username: description: - The username for the registry account From b9bb6d8d29fbd2b6ffc0548ec93f13411d273bcb Mon Sep 17 00:00:00 2001 From: Jiri tyr Date: Tue, 22 Sep 2015 22:27:27 +0100 Subject: [PATCH 54/64] Adding yumrepo module This patch is adding a new module which allows to add and remove YUM repository definitions. The module implements all repository options as described in the `yum.conf` manual page. --- packaging/os/yumrepo.py | 560 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 560 insertions(+) create mode 100644 packaging/os/yumrepo.py diff --git a/packaging/os/yumrepo.py b/packaging/os/yumrepo.py new file mode 100644 index 00000000000..e2052cea191 --- /dev/null +++ b/packaging/os/yumrepo.py @@ -0,0 +1,560 @@ +#!/usr/bin/python +# encoding: utf-8 + +# (c) 2015, Jiri Tyr +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +import ConfigParser +import os + + +DOCUMENTATION = ''' +--- +module: yumrepo +author: Jiri Tyr (@jtyr) +version_added: '2.0' +short_description: Add and remove YUM repositories +description: + - Add or remove YUM repositories in RPM-based Linux distributions. + +options: + bandwidth: + required: false + default: 0 + description: + - Maximum available network bandwidth in bytes/second. Used with the + I(throttle) option. + - If I(throttle) is a percentage and bandwidth is C(0) then bandwidth + throttling will be disabled. If I(throttle) is expressed as a data rate + (bytes/sec) then this option is ignored. Default is C(0) (no bandwidth + throttling). + baseurl: + required: false + default: None + description: + - URL to the directory where the yum repository's 'repodata' directory + lives. + - This or the I(mirrorlist) parameter is required. + cost: + required: false + default: 1000 + description: + - Relative cost of accessing this repository. Useful for weighing one + repo's packages as greater/less than any other. + description: + required: false + default: None + description: + - A human readable string describing the repository. + enabled: + required: false + choices: ['yes', 'no'] + default: 'yes' + description: + - This tells yum whether or not use this repository. + enablegroups: + required: false + choices: ['yes', 'no'] + default: 'yes' + description: + - Determines whether yum will allow the use of package groups for this + repository. + exclude: + required: false + default: None + description: + - List of packages to exclude from updates or installs. This should be a + space separated list. Shell globs using wildcards (eg. C(*) and C(?)) + are allowed. + - The list can also be a regular YAML array. + failovermethod: + required: false + choices: [roundrobin, priority] + default: roundrobin + description: + - C(roundrobin) randomly selects a URL out of the list of URLs to start + with and proceeds through each of them as it encounters a failure + contacting the host. + - C(priority) starts from the first baseurl listed and reads through them + sequentially. + file: + required: false + default: None + description: + - File to use to save the repo in. Defaults to the value of I(name). + gpgcakey: + required: false + default: None + description: + - A URL pointing to the ASCII-armored CA key file for the repository. + gpgcheck: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - Tells yum whether or not it should perform a GPG signature check on + packages. + gpgkey: + required: false + default: None + description: + - A URL pointing to the ASCII-armored GPG key file for the repository. + http_caching: + required: false + choices: [all, packages, none] + default: all + description: + - Determines how upstream HTTP caches are instructed to handle any HTTP + downloads that Yum does. + - C(all) means that all HTTP downloads should be cached. + - C(packages) means that only RPM package downloads should be cached (but + not repository metadata downloads). + - C(none) means that no HTTP downloads should be cached. + includepkgs: + required: false + default: None + description: + - List of packages you want to only use from a repository. This should be + a space separated list. Shell globs using wildcards (eg. C(*) and C(?)) + are allowed. Substitution variables (e.g. C($releasever)) are honored + here. + - The list can also be a regular YAML array. + keepalive: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - This tells yum whether or not HTTP/1.1 keepalive should be used with + this repository. This can improve transfer speeds by using one + connection when downloading multiple files from a repository. + metadata_expire: + required: false + default: 21600 + description: + - Time (in seconds) after which the metadata will expire. + - Default value is 6 hours. + metalink: + required: false + default: None + description: + - Specifies a URL to a metalink file for the repomd.xml, a list of + mirrors for the entire repository are generated by converting the + mirrors for the repomd.xml file to a baseurl. + mirrorlist: + required: false + default: None + description: + - Specifies a URL to a file containing a list of baseurls. + - This or the I(baseurl) parameter is required. + mirrorlist_expire: + required: false + default: 21600 + description: + - Time (in seconds) after which the mirrorlist locally cached will + expire. + - Default value is 6 hours. + name: + required: true + description: + - Unique repository ID. + password: + required: false + default: None + description: + - Password to use with the username for basic authentication. + protect: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - Protect packages from updates from other repositories. + proxy: + required: false + default: None + description: + - URL to the proxy server that yum should use. + proxy_password: + required: false + default: None + description: + - Username to use for proxy. + proxy_username: + required: false + default: None + description: + - Password for this proxy. + repo_gpgcheck: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - This tells yum whether or not it should perform a GPG signature check + on the repodata from this repository. + reposdir: + required: false + default: /etc/yum.repos.d + description: + - Directory where the C(.repo) files will be stored. + retries: + required: false + default: 10 + description: + - Set the number of times any attempt to retrieve a file should retry + before returning an error. Setting this to C(0) makes yum try forever. + skip_if_unavailable: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - If set to C(yes) yum will continue running if this repository cannot be + contacted for any reason. This should be set carefully as all repos are + consulted for any given command. + sslcacert: + required: false + default: None + description: + - Path to the directory containing the databases of the certificate + authorities yum should use to verify SSL certificates. + ssl_check_cert_permissions: + required: false + choices: ['yes', 'no'] + default: 'no' + description: + - Whether yum should check the permissions on the paths for the + certificates on the repository (both remote and local). + - If we can't read any of the files then yum will force + I(skip_if_unavailable) to be true. This is most useful for non-root + processes which use yum on repos that have client cert files which are + readable only by root. + sslclientcert: + required: false + default: None + description: + - Path to the SSL client certificate yum should use to connect to + repos/remote sites. + sslclientkey: + required: false + default: None + description: + - Path to the SSL client key yum should use to connect to repos/remote + sites. + sslverify: + required: false + choices: ['yes', 'no'] + default: 'yes' + description: + - Defines whether yum should verify SSL certificates/hosts at all. + state: + required: false + choices: [absent, present] + default: present + description: + - A source string state. + throttle: + required: false + default: None + description: + - Enable bandwidth throttling for downloads. + - This option can be expressed as a absolute data rate in bytes/sec. An + SI prefix (k, M or G) may be appended to the bandwidth value. + timeout: + required: false + default: 30 + description: + - Number of seconds to wait for a connection before timing out. + username: + required: false + default: None + description: + - Username to use for basic authentication to a repo or really any url. + +extends_documentation_fragment: files + +notes: + - All comments will be removed if modifying an existing repo file. + - Section order is preserved in an existing repo file. + - Parameters in a section are ordered alphabetically in an existing repo + file. + - The repo file will be automatically deleted if it contains no repository. +''' + +EXAMPLES = ''' +- name: Add repository + yumrepo: + name: epel + description: EPEL YUM repo + baseurl: http://download.fedoraproject.org/pub/epel/$releasever/$basearch/ + +- name: Add multiple repositories into the same file (1/2) + yumrepo: + name: epel + description: EPEL YUM repo + file: external_repos + baseurl: http://download.fedoraproject.org/pub/epel/$releasever/$basearch/ + gpgcheck: no +- name: Add multiple repositories into the same file (2/2) + yumrepo: + name: rpmforge + description: RPMforge YUM repo + file: external_repos + baseurl: http://apt.sw.be/redhat/el7/en/$basearch/rpmforge + mirrorlist: http://mirrorlist.repoforge.org/el7/mirrors-rpmforge + enabled: no + +- name: Remove repository + yumrepo: + name: epel + state: absent + +- name: Remove repository from a specific repo file + yumrepo: + name: epel + file: external_repos + state: absent +''' + +RETURN = ''' +repo: + description: repository name + returned: success + type: string + sample: "epel" +state: + description: state of the target, after execution + returned: success + type: string + sample: "present" +''' + + +class YumRepo(object): + # Class global variables + module = None + params = None + section = None + repofile = ConfigParser.RawConfigParser() + + # List of parameters which will be allowed in the repo file output + allowed_params = [ + 'bandwidth', 'baseurl', 'cost', 'enabled', 'enablegroups', 'exclude', + 'failovermethod', 'gpgcakey', 'gpgcheck', 'gpgkey', 'http_caching', + 'includepkgs', 'keepalive', 'metadata_expire', 'metalink', + 'mirrorlist', 'mirrorlist_expire', 'name', 'password', 'protect', + 'proxy', 'proxy_password', 'proxy_username', 'repo_gpgcheck', + 'retries', 'skip_if_unavailable', 'sslcacert', + 'ssl_check_cert_permissions', 'sslclientcert', 'sslclientkey', + 'sslverify', 'throttle', 'timeout', 'username'] + + # List of parameters which can be a list + list_params = ['exclude', 'includepkgs'] + + def __init__(self, module): + # To be able to use fail_json + self.module = module + # Shortcut for the params + self.params = self.module.params + # Section is always the repoid + self.section = self.params['repoid'] + + # Check if repo directory exists + repos_dir = self.params['reposdir'] + if not os.path.isdir(repos_dir): + self.module.fail_json( + msg='Repo directory "%s" does not exist.' % repos_dir) + + # Get the given or the default repo file name + repo_file = self.params['repoid'] + if self.params['file'] is not None: + repo_file = self.params['file'] + + # Set dest; also used to set dest parameter for the FS attributes + self.params['dest'] = os.path.join(repos_dir, "%s.repo" % repo_file) + + # Read the repo file if it exists + if os.path.isfile(self.params['dest']): + self.repofile.read(self.params['dest']) + + def add(self): + # Remove already existing repo and create a new one + if self.repofile.has_section(self.section): + self.repofile.remove_section(self.section) + + # Add section + self.repofile.add_section(self.section) + + # Baseurl/mirrorlist is not required because for removal we need only + # the repo name. This is why we check if the baseurl/mirrorlist is + # defined. + if (self.params['baseurl'], self.params['mirrorlist']) == (None, None): + self.module.fail_json( + msg='Paramater "baseurl" or "mirrorlist" is required for ' + 'adding a new repo.') + + # Set options + for key, value in sorted(self.params.items()): + if key in self.list_params and isinstance(value, list): + # Join items into one string for specific parameters + value = ' '.join(value) + elif isinstance(value, bool): + # Convert boolean value to integer + value = int(value) + + # Set the value only if it was defined (default is None) + if value is not None and key in self.allowed_params: + self.repofile.set(self.section, key, value) + + def save(self): + if len(self.repofile.sections()): + # Write data into the file + try: + fd = open(self.params['dest'], 'wb') + except IOError: + self.module.fail_json( + msg='Cannot open repo file %s.' % + self.params['dest']) + + try: + try: + self.repofile.write(fd) + except Error: + self.module.fail_json( + msg='Cannot write repo file %s.' % + self.params['dest']) + finally: + fd.close() + else: + # Remove the file if there are not repos + try: + os.remove(self.params['dest']) + except OSError: + self.module.fail_json( + msg='Cannot remove empty repo file %s.' % + self.params['dest']) + + def remove(self): + # Remove section if exists + if self.repofile.has_section(self.section): + self.repofile.remove_section(self.section) + + def dump(self): + repo_string = "" + + # Compose the repo file + for section in sorted(self.repofile.sections()): + repo_string += "[%s]\n" % section + + for key, value in sorted(self.repofile.items(section)): + repo_string += "%s = %s\n" % (key, value) + + repo_string += "\n" + + return repo_string + + +def main(): + # Module settings + module = AnsibleModule( + argument_spec=dict( + bandwidth=dict(), + baseurl=dict(), + cost=dict(), + description=dict(), + enabled=dict(type='bool'), + enablegroups=dict(type='bool'), + exclude=dict(), + failovermethod=dict(choices=['roundrobin', 'priority']), + file=dict(), + gpgcakey=dict(), + gpgcheck=dict(type='bool'), + gpgkey=dict(), + http_caching=dict(choices=['all', 'packages', 'none']), + includepkgs=dict(), + keepalive=dict(type='bool'), + metadata_expire=dict(), + metalink=dict(), + mirrorlist=dict(), + mirrorlist_expire=dict(), + name=dict(required=True), + password=dict(no_log=True), + protect=dict(type='bool'), + proxy=dict(), + proxy_password=dict(no_log=True), + proxy_username=dict(), + repo_gpgcheck=dict(type='bool'), + reposdir=dict(default='/etc/yum.repos.d'), + retries=dict(), + skip_if_unavailable=dict(type='bool'), + sslcacert=dict(), + ssl_check_cert_permissions=dict(type='bool'), + sslclientcert=dict(), + sslclientkey=dict(), + sslverify=dict(type='bool'), + state=dict(choices=['present', 'absent'], default='present'), + throttle=dict(), + timeout=dict(), + username=dict(), + ), + add_file_common_args=True, + supports_check_mode=True, + ) + + name = module.params['name'] + state = module.params['state'] + + # Rename "name" and "description" to ensure correct key sorting + module.params['repoid'] = module.params['name'] + module.params['name'] = module.params['description'] + del module.params['description'] + + # Instantiate the YumRepo object + yumrepo = YumRepo(module) + + # Get repo status before change + yumrepo_before = yumrepo.dump() + + # Perform action depending on the state + if state == 'present': + yumrepo.add() + elif state == 'absent': + yumrepo.remove() + + # Get repo status after change + yumrepo_after = yumrepo.dump() + + # Compare repo states + changed = yumrepo_before != yumrepo_after + + # Save the file only if not in check mode and if there was a change + if not module.check_mode and changed: + yumrepo.save() + + # Change file attributes if needed + if os.path.isfile(module.params['dest']): + file_args = module.load_file_common_arguments(module.params) + changed = module.set_fs_attributes_if_different(file_args, changed) + + # Print status of the change + module.exit_json(changed=changed, repo=name, state=state) + + +# Import module snippets +from ansible.module_utils.basic import * + + +if __name__ == '__main__': + main() From 367b88a2ab19d28e42ffffeb4702badebc09dbcb Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 21 Nov 2015 16:07:12 -0800 Subject: [PATCH 55/64] removed json import --- system/puppet.py | 1 - 1 file changed, 1 deletion(-) diff --git a/system/puppet.py b/system/puppet.py index 48a497c37ce..2cb82f8f85a 100644 --- a/system/puppet.py +++ b/system/puppet.py @@ -15,7 +15,6 @@ # You should have received a copy of the GNU General Public License # along with this software. If not, see . -import json import os import pipes import stat From c978f4b33236f7b95da80fdac084ff12f6d8879e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 24 Nov 2015 09:55:13 -0800 Subject: [PATCH 56/64] updated version added --- packaging/os/yumrepo.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packaging/os/yumrepo.py b/packaging/os/yumrepo.py index e2052cea191..ba4aaa2ae8f 100644 --- a/packaging/os/yumrepo.py +++ b/packaging/os/yumrepo.py @@ -27,7 +27,7 @@ DOCUMENTATION = ''' --- module: yumrepo author: Jiri Tyr (@jtyr) -version_added: '2.0' +version_added: '2.1' short_description: Add and remove YUM repositories description: - Add or remove YUM repositories in RPM-based Linux distributions. @@ -283,7 +283,8 @@ options: description: - Username to use for basic authentication to a repo or really any url. -extends_documentation_fragment: files +extends_documentation_fragment: + - files notes: - All comments will be removed if modifying an existing repo file. From 87065005aa915e1a5d8ee286f431be120f48726e Mon Sep 17 00:00:00 2001 From: Rene Moser Date: Mon, 16 Nov 2015 18:48:21 +0100 Subject: [PATCH 57/64] cloudstack: new module cs_volume --- cloud/cloudstack/cs_volume.py | 467 ++++++++++++++++++++++++++++++++++ 1 file changed, 467 insertions(+) create mode 100644 cloud/cloudstack/cs_volume.py diff --git a/cloud/cloudstack/cs_volume.py b/cloud/cloudstack/cs_volume.py new file mode 100644 index 00000000000..30548555587 --- /dev/null +++ b/cloud/cloudstack/cs_volume.py @@ -0,0 +1,467 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2015, Jefferson Girão +# (c) 2015, René Moser +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: cs_volume +short_description: Manages volumes on Apache CloudStack based clouds. +description: + - Create, destroy, attach, detach volumes. +version_added: "2.1" +author: + - "Jefferson Girão (@jeffersongirao)" + - "René Moser (@resmo)" +options: + name: + description: + - Name of the volume. + - C(name) can only contain ASCII letters. + required: true + account: + description: + - Account the volume is related to. + required: false + default: null + custom_id: + description: + - Custom id to the resource. + - Allowed to Root Admins only. + required: false + default: null + disk_offering: + description: + - Name of the disk offering to be used. + - Required one of C(disk_offering), C(snapshot) if volume is not already C(state=present). + required: false + default: null + display_volume: + description: + - Whether to display the volume to the end user or not. + - Allowed to Root Admins only. + required: false + default: true + domain: + description: + - Name of the domain the volume to be deployed in. + required: false + default: null + max_iops: + description: + - Max iops + required: false + default: null + min_iops: + description: + - Min iops + required: false + default: null + project: + description: + - Name of the project the volume to be deployed in. + required: false + default: null + size: + description: + - Size of disk in GB + required: false + default: null + snapshot: + description: + - The snapshot name for the disk volume. + - Required one of C(disk_offering), C(snapshot) if volume is not already C(state=present). + required: false + default: null + force: + description: + - Force removal of volume even it is attached to a VM. + - Considered on C(state=absnet) only. + required: false + default: false + vm: + description: + - Name of the virtual machine to attach the volume to. + required: false + default: null + zone: + description: + - Name of the zone in which the volume should be deployed. + - If not set, default zone is used. + required: false + default: null + state: + description: + - State of the volume. + required: false + default: 'present' + choices: [ 'present', 'absent', 'attached', 'detached' ] + poll_async: + description: + - Poll async jobs until job has finished. + required: false + default: true +extends_documentation_fragment: cloudstack +''' + +EXAMPLES = ''' +# Create volume within project, zone with specified storage options +- local_action: + module: cs_volume + name: web-vm-1-volume + project: Integration + zone: ch-zrh-ix-01 + disk_offering: PerfPlus Storage + size: 20 + +# Create/attach volume to instance +- local_action: + module: cs_volume + name: web-vm-1-volume + disk_offering: PerfPlus Storage + size: 20 + vm: web-vm-1 + state: attached + +# Detach volume +- local_action: + module: cs_volume + name: web-vm-1-volume + state: detached + +# Remove volume +- local_action: + module: cs_volume + name: web-vm-1-volume + state: absent +''' + +RETURN = ''' +id: + description: ID of the volume. + returned: success + type: string + sample: +name: + description: Name of the volume. + returned: success + type: string + sample: web-volume-01 +display_name: + description: Display name of the volume. + returned: success + type: string + sample: web-volume-01 +group: + description: Group the volume belongs to + returned: success + type: string + sample: web +domain: + description: Domain the volume belongs to + returned: success + type: string + sample: example domain +project: + description: Project the volume belongs to + returned: success + type: string + sample: Production +zone: + description: Name of zone the volume is in. + returned: success + type: string + sample: ch-gva-2 +created: + description: Date of the volume was created. + returned: success + type: string + sample: 2014-12-01T14:57:57+0100 +attached: + description: Date of the volume was attached. + returned: success + type: string + sample: 2014-12-01T14:57:57+0100 +type: + description: Disk volume type. + returned: success + type: string + sample: DATADISK +size: + description: Size of disk volume. + returned: success + type: string + sample: 20 +vm: + description: Name of the vm the volume is attached to (not returned when detached) + returned: success + type: string + sample: web-01 +state: + description: State of the volume + returned: success + type: string + sample: Attached +device_id: + description: Id of the device on user vm the volume is attached to (not returned when detached) + returned: success + type: string + sample: 1 +''' + +try: + from cs import CloudStack, CloudStackException, read_config + has_lib_cs = True +except ImportError: + has_lib_cs = False + +# import cloudstack common +from ansible.module_utils.cloudstack import * + + +class AnsibleCloudStackVolume(AnsibleCloudStack): + + def __init__(self, module): + super(AnsibleCloudStackVolume, self).__init__(module) + self.returns = { + 'group': 'group', + 'attached': 'attached', + 'vmname': 'vm', + 'deviceid': 'device_id', + 'type': 'type', + 'size': 'size', + } + self.volume = None + + #TODO implement in cloudstack utils + def get_disk_offering(self, key=None): + disk_offering = self.module.params.get('disk_offering') + if not disk_offering: + return None + + args = {} + args['domainid'] = self.get_domain(key='id') + + disk_offerings = self.cs.listDiskOfferings(**args) + if disk_offerings: + for d in disk_offerings['diskoffering']: + if disk_offering in [d['displaytext'], d['name'], d['id']]: + return self._get_by_key(key, d) + self.module.fail_json(msg="Disk offering '%s' not found" % disk_offering) + + + def get_volume(self): + if not self.volume: + args = {} + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['projectid'] = self.get_project(key='id') + args['type'] = 'DATADISK' + + volumes = self.cs.listVolumes(**args) + if volumes: + volume_name = self.module.params.get('name') + for v in volumes['volume']: + if volume_name.lower() == v['name'].lower(): + self.volume = v + break + return self.volume + + + def get_snapshot(self, key=None): + snapshot = self.module.params.get('snapshot') + if not snapshot: + return None + + args = {} + args['name'] = snapshot + args['account'] = self.get_account('name') + args['domainid'] = self.get_domain('id') + args['projectid'] = self.get_project('id') + + snapshots = self.cs.listSnapshots(**args) + if snapshots: + return self._get_by_key(key, snapshots['snapshot'][0]) + self.module.fail_json(msg="Snapshot with name %s not found" % snapshot) + + + def present_volume(self): + volume = self.get_volume() + if not volume: + disk_offering_id = self.get_disk_offering(key='id') + snapshot_id = self.get_snapshot(key='id') + + if not disk_offering_id and not snapshot_id: + self.module.fail_json(msg="Required one of: disk_offering,snapshot") + + self.result['changed'] = True + + args = {} + args['name'] = self.module.params.get('name') + args['account'] = self.get_account(key='name') + args['domainid'] = self.get_domain(key='id') + args['diskofferingid'] = disk_offering_id + args['displayvolume'] = self.module.params.get('display_volume') + args['maxiops'] = self.module.params.get('max_iops') + args['miniops'] = self.module.params.get('min_iops') + args['projectid'] = self.get_project(key='id') + args['size'] = self.module.params.get('size') + args['snapshotid'] = snapshot_id + args['zoneid'] = self.get_zone(key='id') + + if not self.module.check_mode: + res = self.cs.createVolume(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + poll_async = self.module.params.get('poll_async') + if poll_async: + volume = self.poll_job(res, 'volume') + return volume + + + def attached_volume(self): + volume = self.present_volume() + + if volume.get('virtualmachineid') != self.get_vm(key='id'): + self.result['changed'] = True + + if not self.module.check_mode: + volume = self.detached_volume() + + if 'attached' not in volume: + self.result['changed'] = True + + args = {} + args['id'] = volume['id'] + args['virtualmachineid'] = self.get_vm(key='id') + args['deviceid'] = self.module.params.get('device_id') + + if not self.module.check_mode: + res = self.cs.attachVolume(**args) + if 'errortext' in res: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + poll_async = self.module.params.get('poll_async') + if poll_async: + volume = self.poll_job(res, 'volume') + return volume + + + def detached_volume(self): + volume = self.present_volume() + + if volume: + if 'attached' not in volume: + return volume + + self.result['changed'] = True + + if not self.module.check_mode: + res = self.cs.detachVolume(id=volume['id']) + if 'errortext' in volume: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + poll_async = self.module.params.get('poll_async') + if poll_async: + volume = self.poll_job(res, 'volume') + return volume + + + def absent_volume(self): + volume = self.get_volume() + + if volume: + if 'attached' in volume: + if self.module.param.get('force'): + self.detached_volume() + else: + self.module.fail_json(msg="Volume '%s' is attached, use force=true for detaching and removing the volume." % volume.get('name')) + + self.result['changed'] = True + if not self.module.check_mode: + volume = self.detached_volume() + + res = self.cs.deleteVolume(id=volume['id']) + if 'errortext' in volume: + self.module.fail_json(msg="Failed: '%s'" % res['errortext']) + poll_async = self.module.params.get('poll_async') + if poll_async: + res = self.poll_job(res, 'volume') + + return volume + + +def main(): + argument_spec = cs_argument_spec() + argument_spec.update(dict( + name = dict(required=True), + disk_offering = dict(default=None), + display_volume = dict(choices=BOOLEANS, default=True), + max_iops = dict(type='int', default=None), + min_iops = dict(type='int', default=None), + size = dict(type='int', default=None), + snapshot = dict(default=None), + vm = dict(default=None), + device_id = dict(type='int', default=None), + custom_id = dict(default=None), + force = dict(choices=BOOLEANS, default=False), + state = dict(choices=['present', 'absent', 'attached', 'detached'], default='present'), + zone = dict(default=None), + domain = dict(default=None), + account = dict(default=None), + project = dict(default=None), + poll_async = dict(choices=BOOLEANS, default=True), + )) + + module = AnsibleModule( + argument_spec=argument_spec, + required_together=cs_required_together(), + mutually_exclusive = ( + ['snapshot', 'disk_offering'], + ), + supports_check_mode=True + ) + + if not has_lib_cs: + module.fail_json(msg="python library cs required: pip install cs") + + try: + acs_vol = AnsibleCloudStackVolume(module) + + state = module.params.get('state') + + if state in ['absent']: + volume = acs_vol.absent_volume() + elif state in ['attached']: + volume = acs_vol.attached_volume() + elif state in ['detached']: + volume = acs_vol.detached_volume() + else: + volume = acs_vol.present_volume() + + result = acs_vol.get_result(volume) + + except CloudStackException, e: + module.fail_json(msg='CloudStackException: %s' % str(e)) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * + +if __name__ == '__main__': + main() From 42efef5682143e654748b6d8bec0589ba8b9a3de Mon Sep 17 00:00:00 2001 From: Adam Keech Date: Wed, 25 Nov 2015 11:41:02 -0500 Subject: [PATCH 58/64] Appending "Registry::" is not needed and no longer works. --- windows/win_regedit.ps1 | 2 -- 1 file changed, 2 deletions(-) diff --git a/windows/win_regedit.ps1 b/windows/win_regedit.ps1 index ee92e781d0c..f9491e39c57 100644 --- a/windows/win_regedit.ps1 +++ b/windows/win_regedit.ps1 @@ -31,8 +31,6 @@ $state = Get-Attr -obj $params -name "state" -validateSet "present","absent" -de $registryData = Get-Attr -obj $params -name "data" -default $null $registryDataType = Get-Attr -obj $params -name "datatype" -validateSet "binary","dword","expandstring","multistring","string","qword" -default "string" -$registryKey = "Registry::" + $registryKey - If ($state -eq "present" -and $registryData -eq $null -and $registryValue -ne $null) { Fail-Json $result "missing required argument: data" From c28a0031bbaf20b6fae0e2ccc9820698861ff045 Mon Sep 17 00:00:00 2001 From: GUILLAUME GROSSETIE Date: Thu, 26 Nov 2015 11:37:59 +0100 Subject: [PATCH 59/64] Resolves #1290, Adds limit_type choice "-" --- system/pam_limits.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/pam_limits.py b/system/pam_limits.py index eb04021c3e0..4003f76d3f8 100644 --- a/system/pam_limits.py +++ b/system/pam_limits.py @@ -40,7 +40,7 @@ options: description: - Limit type, see C(man limits) for an explanation required: true - choices: [ "hard", "soft" ] + choices: [ "hard", "soft", "-" ] limit_item: description: - The limit to be set From 3c4f954f0fece5dcb3241d6d5391273334206241 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 30 Nov 2015 19:01:57 -0800 Subject: [PATCH 60/64] Don't raise or catch StandardError in amazon modules --- cloud/amazon/dynamodb_table.py | 3 +- cloud/amazon/ec2_elb_facts.py | 5 +- cloud/amazon/ec2_eni.py | 99 +++++++++++------------ cloud/amazon/ec2_eni_facts.py | 2 +- cloud/amazon/ec2_remote_facts.py | 33 ++++---- cloud/amazon/ec2_vpc_igw.py | 2 +- cloud/amazon/ec2_vpc_route_table.py | 31 ++++--- cloud/amazon/ec2_vpc_route_table_facts.py | 2 +- cloud/amazon/ec2_vpc_subnet.py | 7 +- cloud/amazon/ec2_vpc_subnet_facts.py | 2 +- cloud/amazon/ecs_cluster.py | 13 ++- cloud/amazon/s3_lifecycle.py | 37 +++++---- cloud/amazon/s3_logging.py | 37 +++++---- cloud/amazon/sqs_queue.py | 7 +- cloud/amazon/sts_assume_role.py | 46 +++++------ 15 files changed, 157 insertions(+), 169 deletions(-) diff --git a/cloud/amazon/dynamodb_table.py b/cloud/amazon/dynamodb_table.py index 1daf55e9d18..a39ecdd3f48 100644 --- a/cloud/amazon/dynamodb_table.py +++ b/cloud/amazon/dynamodb_table.py @@ -268,8 +268,7 @@ def main(): try: connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params) - - except (NoAuthHandlerFound, StandardError), e: + except (NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) state = module.params.get('state') diff --git a/cloud/amazon/ec2_elb_facts.py b/cloud/amazon/ec2_elb_facts.py index aaf3049bfd2..4289ef7a232 100644 --- a/cloud/amazon/ec2_elb_facts.py +++ b/cloud/amazon/ec2_elb_facts.py @@ -184,7 +184,7 @@ def main(): if region: try: connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") @@ -194,4 +194,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_eni.py b/cloud/amazon/ec2_eni.py index 72e5483e36b..5a6bd1f1b4d 100644 --- a/cloud/amazon/ec2_eni.py +++ b/cloud/amazon/ec2_eni.py @@ -96,25 +96,25 @@ EXAMPLES = ''' private_ip_address: 172.31.0.20 subnet_id: subnet-xxxxxxxx state: present - + # Destroy an ENI, detaching it from any instance if necessary - ec2_eni: eni_id: eni-xxxxxxx force_detach: yes state: absent - + # Update an ENI - ec2_eni: eni_id: eni-xxxxxxx description: "My new description" state: present - + # Detach an ENI from an instance - ec2_eni: eni_id: eni-xxxxxxx instance_id: None state: present - + ### Delete an interface on termination # First create the interface - ec2_eni: @@ -124,7 +124,7 @@ EXAMPLES = ''' subnet_id: subnet-xxxxxxxx state: present register: eni - + # Modify the interface to enable the delete_on_terminaton flag - ec2_eni: eni_id: {{ "eni.interface.id" }} @@ -145,14 +145,14 @@ except ImportError: def get_error_message(xml_string): - + root = ET.fromstring(xml_string) - for message in root.findall('.//Message'): + for message in root.findall('.//Message'): return message.text - - + + def get_eni_info(interface): - + interface_info = {'id': interface.id, 'subnet_id': interface.subnet_id, 'vpc_id': interface.vpc_id, @@ -164,7 +164,7 @@ def get_eni_info(interface): 'source_dest_check': interface.source_dest_check, 'groups': dict((group.id, group.name) for group in interface.groups), } - + if interface.attachment is not None: interface_info['attachment'] = {'attachment_id': interface.attachment.id, 'instance_id': interface.attachment.instance_id, @@ -173,11 +173,11 @@ def get_eni_info(interface): 'attach_time': interface.attachment.attach_time, 'delete_on_termination': interface.attachment.delete_on_termination, } - + return interface_info - + def wait_for_eni(eni, status): - + while True: time.sleep(3) eni.update() @@ -188,23 +188,20 @@ def wait_for_eni(eni, status): else: if status == "attached" and eni.attachment.status == "attached": break - - + + def create_eni(connection, module): - + instance_id = module.params.get("instance_id") if instance_id == 'None': instance_id = None - do_detach = True - else: - do_detach = False device_index = module.params.get("device_index") subnet_id = module.params.get('subnet_id') private_ip_address = module.params.get('private_ip_address') description = module.params.get('description') security_groups = module.params.get('security_groups') changed = False - + try: eni = compare_eni(connection, module) if eni is None: @@ -212,22 +209,22 @@ def create_eni(connection, module): if instance_id is not None: try: eni.attach(instance_id, device_index) - except BotoServerError as ex: + except BotoServerError: eni.delete() raise # Wait to allow creation / attachment to finish wait_for_eni(eni, "attached") eni.update() changed = True - + except BotoServerError as e: module.fail_json(msg=get_error_message(e.args[2])) - + module.exit_json(changed=changed, interface=get_eni_info(eni)) - + def modify_eni(connection, module): - + eni_id = module.params.get("eni_id") instance_id = module.params.get("instance_id") if instance_id == 'None': @@ -236,8 +233,6 @@ def modify_eni(connection, module): else: do_detach = False device_index = module.params.get("device_index") - subnet_id = module.params.get('subnet_id') - private_ip_address = module.params.get('private_ip_address') description = module.params.get('description') security_groups = module.params.get('security_groups') force_detach = module.params.get("force_detach") @@ -245,7 +240,6 @@ def modify_eni(connection, module): delete_on_termination = module.params.get("delete_on_termination") changed = False - try: # Get the eni with the eni_id specified eni_result_set = connection.get_all_network_interfaces(eni_id) @@ -282,20 +276,20 @@ def modify_eni(connection, module): except BotoServerError as e: print e module.fail_json(msg=get_error_message(e.args[2])) - + eni.update() module.exit_json(changed=changed, interface=get_eni_info(eni)) - - + + def delete_eni(connection, module): - + eni_id = module.params.get("eni_id") force_detach = module.params.get("force_detach") - + try: eni_result_set = connection.get_all_network_interfaces(eni_id) eni = eni_result_set[0] - + if force_detach is True: if eni.attachment is not None: eni.detach(force_detach) @@ -307,7 +301,7 @@ def delete_eni(connection, module): else: eni.delete() changed = True - + module.exit_json(changed=changed) except BotoServerError as e: msg = get_error_message(e.args[2]) @@ -316,35 +310,35 @@ def delete_eni(connection, module): module.exit_json(changed=False) else: module.fail_json(msg=get_error_message(e.args[2])) - + def compare_eni(connection, module): - + eni_id = module.params.get("eni_id") subnet_id = module.params.get('subnet_id') private_ip_address = module.params.get('private_ip_address') description = module.params.get('description') security_groups = module.params.get('security_groups') - + try: all_eni = connection.get_all_network_interfaces(eni_id) for eni in all_eni: remote_security_groups = get_sec_group_list(eni.groups) - if (eni.subnet_id == subnet_id) and (eni.private_ip_address == private_ip_address) and (eni.description == description) and (remote_security_groups == security_groups): + if (eni.subnet_id == subnet_id) and (eni.private_ip_address == private_ip_address) and (eni.description == description) and (remote_security_groups == security_groups): return eni - + except BotoServerError as e: module.fail_json(msg=get_error_message(e.args[2])) - + return None def get_sec_group_list(groups): - + # Build list of remote security groups remote_security_groups = [] for group in groups: remote_security_groups.append(group.id.encode()) - + return remote_security_groups @@ -357,7 +351,7 @@ def main(): private_ip_address = dict(), subnet_id = dict(), description = dict(), - security_groups = dict(type='list'), + security_groups = dict(type='list'), device_index = dict(default=0, type='int'), state = dict(default='present', choices=['present', 'absent']), force_detach = dict(default='no', type='bool'), @@ -365,18 +359,18 @@ def main(): delete_on_termination = dict(default=None, type='bool') ) ) - + module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') - + region, ec2_url, aws_connect_params = get_aws_connection_info(module) - + if region: try: connection = connect_to_aws(boto.ec2, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") @@ -395,12 +389,13 @@ def main(): if eni_id is None: module.fail_json(msg="eni_id must be specified") else: - delete_eni(connection, module) - + delete_eni(connection, module) + from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * # this is magic, see lib/ansible/module_common.py #<> -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_eni_facts.py b/cloud/amazon/ec2_eni_facts.py index c25535f51eb..e95a6ea1029 100644 --- a/cloud/amazon/ec2_eni_facts.py +++ b/cloud/amazon/ec2_eni_facts.py @@ -113,7 +113,7 @@ def main(): if region: try: connection = connect_to_aws(boto.ec2, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") diff --git a/cloud/amazon/ec2_remote_facts.py b/cloud/amazon/ec2_remote_facts.py index cf54fa0274d..28fc2c97d63 100644 --- a/cloud/amazon/ec2_remote_facts.py +++ b/cloud/amazon/ec2_remote_facts.py @@ -44,12 +44,12 @@ EXAMPLES = ''' filters: instance-state-name: running "tag:Name": Example - + # Gather facts about instance i-123456 - ec2_remote_facts: filters: instance-id: i-123456 - + # Gather facts about all instances in vpc-123456 that are t2.small type - ec2_remote_facts: filters: @@ -66,23 +66,23 @@ except ImportError: HAS_BOTO = False def get_instance_info(instance): - + # Get groups groups = [] for group in instance.groups: - groups.append({ 'id': group.id, 'name': group.name }.copy()) + groups.append({ 'id': group.id, 'name': group.name }.copy()) # Get interfaces interfaces = [] for interface in instance.interfaces: - interfaces.append({ 'id': interface.id, 'mac_address': interface.mac_address }.copy()) + interfaces.append({ 'id': interface.id, 'mac_address': interface.mac_address }.copy()) # If an instance is terminated, sourceDestCheck is no longer returned try: - source_dest_check = instance.sourceDestCheck + source_dest_check = instance.sourceDestCheck except AttributeError: - source_dest_check = None - + source_dest_check = None + instance_info = { 'id': instance.id, 'kernel': instance.kernel, 'instance_profile': instance.instance_profile, @@ -118,23 +118,23 @@ def get_instance_info(instance): } return instance_info - + def list_ec2_instances(connection, module): - + filters = module.params.get("filters") instance_dict_array = [] - + try: all_instances = connection.get_only_instances(filters=filters) except BotoServerError as e: module.fail_json(msg=e.message) - + for instance in all_instances: instance_dict_array.append(get_instance_info(instance)) - + module.exit_json(instances=instance_dict_array) - + def main(): argument_spec = ec2_argument_spec() @@ -154,11 +154,11 @@ def main(): if region: try: connection = connect_to_aws(boto.ec2, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") - + list_ec2_instances(connection, module) # import module snippets @@ -167,4 +167,3 @@ from ansible.module_utils.ec2 import * if __name__ == '__main__': main() - diff --git a/cloud/amazon/ec2_vpc_igw.py b/cloud/amazon/ec2_vpc_igw.py index 16437abf073..a4e58faac8b 100644 --- a/cloud/amazon/ec2_vpc_igw.py +++ b/cloud/amazon/ec2_vpc_igw.py @@ -134,7 +134,7 @@ def main(): if region: try: connection = connect_to_aws(boto.vpc, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") diff --git a/cloud/amazon/ec2_vpc_route_table.py b/cloud/amazon/ec2_vpc_route_table.py index 829dda62d3e..eef58c23ced 100644 --- a/cloud/amazon/ec2_vpc_route_table.py +++ b/cloud/amazon/ec2_vpc_route_table.py @@ -99,7 +99,7 @@ EXAMPLES = ''' - dest: 0.0.0.0/0 instance_id: "{{ nat.instance_id }}" register: nat_route_table - + ''' @@ -253,23 +253,23 @@ def get_route_table_by_id(vpc_conn, vpc_id, route_table_id): route_tables = vpc_conn.get_all_route_tables(route_table_ids=[route_table_id], filters={'vpc_id': vpc_id}) if route_tables: route_table = route_tables[0] - + return route_table - + def get_route_table_by_tags(vpc_conn, vpc_id, tags): - + count = 0 - route_table = None + route_table = None route_tables = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc_id}) for table in route_tables: this_tags = get_resource_tags(vpc_conn, table.id) if tags_match(tags, this_tags): route_table = table count +=1 - + if count > 1: raise RuntimeError("Tags provided do not identify a unique route table") - else: + else: return route_table @@ -463,7 +463,7 @@ def create_route_spec(connection, routes, vpc_id): return routes def ensure_route_table_present(connection, module): - + lookup = module.params.get('lookup') propagating_vgw_ids = module.params.get('propagating_vgw_ids', []) route_table_id = module.params.get('route_table_id') @@ -475,7 +475,7 @@ def ensure_route_table_present(connection, module): routes = create_route_spec(connection, module.params.get('routes'), vpc_id) except AnsibleIgwSearchException as e: module.fail_json(msg=e[0]) - + changed = False tags_valid = False @@ -494,7 +494,7 @@ def ensure_route_table_present(connection, module): route_table = get_route_table_by_id(connection, vpc_id, route_table_id) except EC2ResponseError as e: module.fail_json(msg=e.message) - + # If no route table returned then create new route table if route_table is None: try: @@ -505,7 +505,7 @@ def ensure_route_table_present(connection, module): module.exit_json(changed=True) module.fail_json(msg=e.message) - + if routes is not None: try: result = ensure_routes(connection, route_table, routes, propagating_vgw_ids, check_mode) @@ -560,18 +560,18 @@ def main(): vpc_id = dict(default=None, required=True) ) ) - + module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) - + if not HAS_BOTO: module.fail_json(msg='boto is required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) - + if region: try: connection = connect_to_aws(boto.vpc, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") @@ -598,4 +598,3 @@ from ansible.module_utils.ec2 import * # noqa if __name__ == '__main__': main() - diff --git a/cloud/amazon/ec2_vpc_route_table_facts.py b/cloud/amazon/ec2_vpc_route_table_facts.py index f93ab060fd6..8b5e60ab2c9 100644 --- a/cloud/amazon/ec2_vpc_route_table_facts.py +++ b/cloud/amazon/ec2_vpc_route_table_facts.py @@ -111,7 +111,7 @@ def main(): if region: try: connection = connect_to_aws(boto.vpc, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") diff --git a/cloud/amazon/ec2_vpc_subnet.py b/cloud/amazon/ec2_vpc_subnet.py index 46f78362a28..d0cc68e07fa 100644 --- a/cloud/amazon/ec2_vpc_subnet.py +++ b/cloud/amazon/ec2_vpc_subnet.py @@ -71,7 +71,7 @@ EXAMPLES = ''' state: absent vpc_id: vpc-123456 cidr: 10.0.1.16/28 - + ''' import sys # noqa @@ -143,7 +143,7 @@ def create_subnet(vpc_conn, vpc_id, cidr, az, check_mode): if e.error_code == "DryRunOperation": subnet = None else: - raise AnsibleVPCSubnetCreationException( + raise AnsibleVPCSubnetCreationException( 'Unable to create subnet {0}, error: {1}'.format(cidr, e)) return subnet @@ -242,7 +242,7 @@ def main(): if region: try: connection = connect_to_aws(boto.vpc, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") @@ -270,4 +270,3 @@ from ansible.module_utils.ec2 import * # noqa if __name__ == '__main__': main() - diff --git a/cloud/amazon/ec2_vpc_subnet_facts.py b/cloud/amazon/ec2_vpc_subnet_facts.py index 48f514ba49f..bfad2fb72a7 100644 --- a/cloud/amazon/ec2_vpc_subnet_facts.py +++ b/cloud/amazon/ec2_vpc_subnet_facts.py @@ -111,7 +111,7 @@ def main(): if region: try: connection = connect_to_aws(boto.vpc, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") diff --git a/cloud/amazon/ecs_cluster.py b/cloud/amazon/ecs_cluster.py index 6b37762521f..3a0d7c10636 100644 --- a/cloud/amazon/ecs_cluster.py +++ b/cloud/amazon/ecs_cluster.py @@ -25,7 +25,7 @@ description: - Creates or terminates ecs clusters. version_added: "2.0" author: Mark Chance(@Java1Guy) -requirements: [ json, time, boto, boto3 ] +requirements: [ boto, boto3 ] options: state: description: @@ -100,8 +100,9 @@ status: returned: ACTIVE type: string ''' +import time + try: - import json, time import boto HAS_BOTO = True except ImportError: @@ -147,7 +148,7 @@ class EcsClusterManager: c = self.find_in_array(response['clusters'], cluster_name) if c: return c - raise StandardError("Unknown problem describing cluster %s." % cluster_name) + raise Exception("Unknown problem describing cluster %s." % cluster_name) def create_cluster(self, clusterName = 'default'): response = self.ecs.create_cluster(clusterName=clusterName) @@ -170,12 +171,10 @@ def main(): module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together) if not HAS_BOTO: - module.fail_json(msg='boto is required.') + module.fail_json(msg='boto is required.') if not HAS_BOTO3: - module.fail_json(msg='boto3 is required.') - - cluster_name = module.params['name'] + module.fail_json(msg='boto3 is required.') cluster_mgr = EcsClusterManager(module) try: diff --git a/cloud/amazon/s3_lifecycle.py b/cloud/amazon/s3_lifecycle.py index da8e8a8402f..891beac01f1 100644 --- a/cloud/amazon/s3_lifecycle.py +++ b/cloud/amazon/s3_lifecycle.py @@ -94,7 +94,7 @@ EXAMPLES = ''' prefix: /logs/ status: enabled state: present - + # Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days - s3_lifecycle: name: mybucket @@ -103,7 +103,7 @@ EXAMPLES = ''' prefix: /logs/ status: enabled state: present - + # Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030. Note that midnight GMT must be specified. # Be sure to quote your date strings - s3_lifecycle: @@ -113,20 +113,20 @@ EXAMPLES = ''' prefix: /logs/ status: enabled state: present - + # Disable the rule created above - s3_lifecycle: name: mybucket prefix: /logs/ status: disabled state: present - + # Delete the lifecycle rule created above - s3_lifecycle: name: mybucket prefix: /logs/ state: absent - + ''' import xml.etree.ElementTree as ET @@ -182,7 +182,7 @@ def create_lifecycle_rule(connection, module): expiration_obj = Expiration(date=expiration_date) else: expiration_obj = None - + # Create transition if transition_days is not None: transition_obj = Transition(days=transition_days, storage_class=storage_class.upper()) @@ -232,7 +232,7 @@ def create_lifecycle_rule(connection, module): bucket.configure_lifecycle(lifecycle_obj) except S3ResponseError, e: module.fail_json(msg=e.message) - + module.exit_json(changed=changed) def compare_rule(rule_a, rule_b): @@ -306,7 +306,7 @@ def destroy_lifecycle_rule(connection, module): # Create lifecycle lifecycle_obj = Lifecycle() - + # Check if rule exists # If an ID exists, use that otherwise compare based on prefix if rule_id is not None: @@ -323,8 +323,7 @@ def destroy_lifecycle_rule(connection, module): changed = True else: lifecycle_obj.append(existing_rule) - - + # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration try: if lifecycle_obj: @@ -333,9 +332,9 @@ def destroy_lifecycle_rule(connection, module): bucket.delete_lifecycle_configuration() except BotoServerError, e: module.fail_json(msg=e.message) - + module.exit_json(changed=changed) - + def main(): @@ -361,18 +360,18 @@ def main(): [ 'expiration_days', 'expiration_date' ], [ 'expiration_days', 'transition_date' ], [ 'transition_days', 'transition_date' ], - [ 'transition_days', 'expiration_date' ] + [ 'transition_days', 'expiration_date' ] ] ) if not HAS_BOTO: module.fail_json(msg='boto required for this module') - + if not HAS_DATEUTIL: - module.fail_json(msg='dateutil required for this module') + module.fail_json(msg='dateutil required for this module') region, ec2_url, aws_connect_params = get_aws_connection_info(module) - + if region in ('us-east-1', '', None): # S3ism for the US Standard region location = Location.DEFAULT @@ -385,7 +384,7 @@ def main(): # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases if connection is None: connection = boto.connect_s3(**aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) expiration_date = module.params.get("expiration_date") @@ -398,13 +397,13 @@ def main(): datetime.datetime.strptime(expiration_date, "%Y-%m-%dT%H:%M:%S.000Z") except ValueError, e: module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included") - + if transition_date is not None: try: datetime.datetime.strptime(transition_date, "%Y-%m-%dT%H:%M:%S.000Z") except ValueError, e: module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included") - + if state == 'present': create_lifecycle_rule(connection, module) elif state == 'absent': diff --git a/cloud/amazon/s3_logging.py b/cloud/amazon/s3_logging.py index 8047a5083bc..dca2a28aca0 100644 --- a/cloud/amazon/s3_logging.py +++ b/cloud/amazon/s3_logging.py @@ -61,7 +61,7 @@ EXAMPLES = ''' s3_logging: name: mywebsite.com state: absent - + ''' try: @@ -74,21 +74,21 @@ except ImportError: def compare_bucket_logging(bucket, target_bucket, target_prefix): - + bucket_log_obj = bucket.get_logging_status() if bucket_log_obj.target != target_bucket or bucket_log_obj.prefix != target_prefix: return False else: return True - + def enable_bucket_logging(connection, module): - + bucket_name = module.params.get("name") target_bucket = module.params.get("target_bucket") target_prefix = module.params.get("target_prefix") changed = False - + try: bucket = connection.get_bucket(bucket_name) except S3ResponseError as e: @@ -111,15 +111,15 @@ def enable_bucket_logging(connection, module): except S3ResponseError as e: module.fail_json(msg=e.message) - + module.exit_json(changed=changed) - - + + def disable_bucket_logging(connection, module): - + bucket_name = module.params.get("name") changed = False - + try: bucket = connection.get_bucket(bucket_name) if not compare_bucket_logging(bucket, None, None): @@ -127,12 +127,12 @@ def disable_bucket_logging(connection, module): changed = True except S3ResponseError as e: module.fail_json(msg=e.message) - + module.exit_json(changed=changed) - - + + def main(): - + argument_spec = ec2_argument_spec() argument_spec.update( dict( @@ -142,16 +142,16 @@ def main(): state = dict(required=False, default='present', choices=['present', 'absent']) ) ) - + module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') - + region, ec2_url, aws_connect_params = get_aws_connection_info(module) if region in ('us-east-1', '', None): - # S3ism for the US Standard region + # S3ism for the US Standard region location = Location.DEFAULT else: # Boto uses symbolic names for locations but region strings will @@ -162,10 +162,9 @@ def main(): # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases if connection is None: connection = boto.connect_s3(**aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) - state = module.params.get("state") if state == 'present': diff --git a/cloud/amazon/sqs_queue.py b/cloud/amazon/sqs_queue.py index 0d098c6df52..de0ca7ebff1 100644 --- a/cloud/amazon/sqs_queue.py +++ b/cloud/amazon/sqs_queue.py @@ -215,8 +215,8 @@ def main(): try: connection = connect_to_aws(boto.sqs, region, **aws_connect_params) - - except (NoAuthHandlerFound, StandardError), e: + + except (NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) state = module.params.get('state') @@ -230,4 +230,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/sts_assume_role.py b/cloud/amazon/sts_assume_role.py index b089550adab..b5f2c810351 100644 --- a/cloud/amazon/sts_assume_role.py +++ b/cloud/amazon/sts_assume_role.py @@ -16,7 +16,7 @@ DOCUMENTATION = ''' --- -module: sts_assume_role +module: sts_assume_role short_description: Assume a role using AWS Security Token Service and obtain temporary credentials description: - Assume a role using AWS Security Token Service and obtain temporary credentials @@ -25,7 +25,7 @@ author: Boris Ekelchik (@bekelchik) options: role_arn: description: - - The Amazon Resource Name (ARN) of the role that the caller is assuming (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs) + - The Amazon Resource Name (ARN) of the role that the caller is assuming (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs) required: true role_session_name: description: @@ -33,27 +33,27 @@ options: required: true policy: description: - - Supplemental policy to use in addition to assumed role's policies. + - Supplemental policy to use in addition to assumed role's policies. required: false default: null duration_seconds: description: - - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds. + - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds. required: false default: null external_id: description: - - A unique identifier that is used by third parties to assume a role in their customers' accounts. + - A unique identifier that is used by third parties to assume a role in their customers' accounts. required: false default: null mfa_serial_number: description: - - he identification number of the MFA device that is associated with the user who is making the AssumeRole call. + - he identification number of the MFA device that is associated with the user who is making the AssumeRole call. required: false default: null mfa_token: description: - - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA. + - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA. required: false default: null notes: @@ -67,12 +67,12 @@ EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. # Assume an existing role (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) -sts_assume_role: +sts_assume_role: role_arn: "arn:aws:iam::123456789012:role/someRole" session_name: "someRoleSession" register: assumed_role -# Use the assumed role above to tag an instance in account 123456789012 +# Use the assumed role above to tag an instance in account 123456789012 ec2_tag: aws_access_key: "{{ assumed_role.sts_creds.access_key }}" aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}" @@ -84,19 +84,16 @@ ec2_tag: ''' -import sys -import time - try: import boto.sts from boto.exception import BotoServerError HAS_BOTO = True except ImportError: HAS_BOTO = False - + def assume_role_policy(connection, module): - + role_arn = module.params.get('role_arn') role_session_name = module.params.get('role_session_name') policy = module.params.get('policy') @@ -105,13 +102,13 @@ def assume_role_policy(connection, module): mfa_serial_number = module.params.get('mfa_serial_number') mfa_token = module.params.get('mfa_token') changed = False - + try: assumed_role = connection.assume_role(role_arn, role_session_name, policy, duration_seconds, external_id, mfa_serial_number, mfa_token) - changed = True + changed = True except BotoServerError, e: module.fail_json(msg=e) - + module.exit_json(changed=changed, sts_creds=assumed_role.credentials.__dict__, sts_user=assumed_role.user.__dict__) def main(): @@ -127,18 +124,18 @@ def main(): mfa_token = dict(required=False, default=None) ) ) - + module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') - + region, ec2_url, aws_connect_params = get_aws_connection_info(module) - + if region: try: connection = connect_to_aws(boto.sts, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") @@ -147,10 +144,11 @@ def main(): assume_role_policy(connection, module) except BotoServerError, e: module.fail_json(msg=e) - - + + # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() From fba8c9f8a7704874f33b260f9d92c942fadb99c4 Mon Sep 17 00:00:00 2001 From: Michael Weber Date: Tue, 1 Dec 2015 01:56:23 -0800 Subject: [PATCH 61/64] Fix error 'fail_json() takes exactly 1 argument' Fixes bug #1257 --- monitoring/nagios.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monitoring/nagios.py b/monitoring/nagios.py index ee67a3ae20b..b55a374b34f 100644 --- a/monitoring/nagios.py +++ b/monitoring/nagios.py @@ -266,7 +266,7 @@ def main(): module.fail_json(msg='no command passed for command action') ################################################################## if not cmdfile: - module.fail_json('unable to locate nagios.cfg') + module.fail_json(msg='unable to locate nagios.cfg') ################################################################## ansible_nagios = Nagios(module, **module.params) From 32658b9d3b4d0a58459c2fa83a7594bf98676acd Mon Sep 17 00:00:00 2001 From: Guillaume Grossetie Date: Tue, 1 Dec 2015 14:22:25 +0100 Subject: [PATCH 62/64] Resolves #1312, Improve pam_limits documentation Adds comment parameter and improve examples. --- system/pam_limits.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/system/pam_limits.py b/system/pam_limits.py index 4003f76d3f8..e14408fb4e2 100644 --- a/system/pam_limits.py +++ b/system/pam_limits.py @@ -78,14 +78,22 @@ options: - Modify the limits.conf path. required: false default: "/etc/security/limits.conf" + comment: + description: + - Comment associated with the limit. + required: false + default: '' ''' EXAMPLES = ''' -# Add or modify limits for the user joe +# Add or modify nofile soft limit for the user joe - pam_limits: domain=joe limit_type=soft limit_item=nofile value=64000 -# Add or modify limits for the user joe. Keep or set the maximal value -- pam_limits: domain=joe limit_type=soft limit_item=nofile value=1000000 +# Add or modify fsize hard limit for the user smith. Keep or set the maximal value. +- pam_limits: domain=smith limit_type=hard limit_item=fsize value=1000000 use_max=yes + +# Add or modify memlock, both soft and hard, limit for the user james with a comment. +- pam_limits: domain=james limit_type=- limit_item=memlock value=unlimited comment="unlimited memory lock for james" ''' def main(): From 8d866669bbac18a350a2c8616ac1efa94358d68e Mon Sep 17 00:00:00 2001 From: Josh Gachnang Date: Tue, 1 Dec 2015 11:16:29 -0600 Subject: [PATCH 63/64] Fix mongodb_user docs typo Bob's last name is Belcher: http://bobs-burgers.wikia.com/wiki/Bob_Belcher. These docs made me chuckle, so thanks :) --- database/misc/mongodb_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/misc/mongodb_user.py b/database/misc/mongodb_user.py index c18ad6004f5..12d348e9a92 100644 --- a/database/misc/mongodb_user.py +++ b/database/misc/mongodb_user.py @@ -124,7 +124,7 @@ EXAMPLES = ''' - mongodb_user: database=burgers name=joe password=12345 roles='readWriteAnyDatabase' state=present # add a user to database in a replica set, the primary server is automatically discovered and written to -- mongodb_user: database=burgers name=bob replica_set=blecher password=12345 roles='readWriteAnyDatabase' state=present +- mongodb_user: database=burgers name=bob replica_set=belcher password=12345 roles='readWriteAnyDatabase' state=present ''' import ConfigParser From 037ff890639ff5387a170398a4b2f0842b8d55d3 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sun, 29 Nov 2015 23:48:50 +0100 Subject: [PATCH 64/64] Add a more explicit error message, fix #1282 --- system/firewalld.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/system/firewalld.py b/system/firewalld.py index 47d98544000..4a2e7644bf1 100644 --- a/system/firewalld.py +++ b/system/firewalld.py @@ -75,6 +75,7 @@ options: default: 0 notes: - Not tested on any Debian based system. + - Requires the python2 bindings of firewalld, who may not be installed by default if the distribution switched to python 3 requirements: [ 'firewalld >= 0.2.11' ] author: "Adam Miller (@maxamillion)" ''' @@ -251,7 +252,7 @@ def main(): module.fail(msg='permanent is a required parameter') if not HAS_FIREWALLD: - module.fail_json(msg='firewalld required for this module') + module.fail_json(msg='firewalld and its python 2 module are required for this module') ## Pre-run version checking if FW_VERSION < "0.2.11":