From 9ca27eedccfaf01f1f348362fd973bfe93c7a540 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bonicoli Date: Sun, 16 Nov 2014 01:13:29 +0100 Subject: [PATCH 001/464] apt_repository: fix file mode 'set_mode_if_different' method should be called on dest filename and after 'atomic_move' method --- packaging/os/apt_repository.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packaging/os/apt_repository.py b/packaging/os/apt_repository.py index 2ee5819fc4e..5f252371e4a 100644 --- a/packaging/os/apt_repository.py +++ b/packaging/os/apt_repository.py @@ -238,10 +238,6 @@ class SourcesList(object): d, fn = os.path.split(filename) fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d) - # allow the user to override the default mode - this_mode = module.params['mode'] - module.set_mode_if_different(tmp_path, this_mode, False) - f = os.fdopen(fd, 'w') for n, valid, enabled, source, comment in sources: chunks = [] @@ -259,6 +255,10 @@ class SourcesList(object): except IOError, err: module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, unicode(err))) module.atomic_move(tmp_path, filename) + + # allow the user to override the default mode + this_mode = module.params['mode'] + module.set_mode_if_different(filename, this_mode, False) else: del self.files[filename] if os.path.exists(filename): From 16de3281c378fecb0b62bf6e84dfc4267140e9f9 Mon Sep 17 00:00:00 2001 From: Bret Martin Date: Wed, 19 Nov 2014 10:40:44 -0500 Subject: [PATCH 002/464] ec2_vpc module: add route table resource tagging --- cloud/amazon/ec2_vpc.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_vpc.py b/cloud/amazon/ec2_vpc.py index e4dc9a65f7d..40305aa8408 100644 --- a/cloud/amazon/ec2_vpc.py +++ b/cloud/amazon/ec2_vpc.py @@ -72,7 +72,7 @@ options: aliases: [] route_tables: description: - - 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},] }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.' + - 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. resource_tags is optional and uses dictionary form: { "Name": "public", ... }. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.' required: false default: null aliases: [] @@ -422,6 +422,9 @@ def create_vpc(module, vpc_conn): for rt in route_tables: try: new_rt = vpc_conn.create_route_table(vpc.id) + new_rt_tags = rt.get('resource_tags', None) + if new_rt_tags: + vpc_conn.create_tags(new_rt.id, new_rt_tags) for route in rt['routes']: route_kwargs = {} if route['gw'] == 'igw': From d7db09a0eb9d02adaabb0af7e167b9bdb9354644 Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Sat, 22 Nov 2014 20:59:36 -0500 Subject: [PATCH 003/464] django_manage: expand ~ in app_path parameter Allow users to specify app_path parameters that contain ~, for example: app_path=~/myproject --- web_infrastructure/django_manage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py index 580cc63c2dd..424bf6821e4 100644 --- a/web_infrastructure/django_manage.py +++ b/web_infrastructure/django_manage.py @@ -218,7 +218,7 @@ def main(): ) command = module.params['command'] - app_path = module.params['app_path'] + app_path = os.path.expanduser(module.params['app_path']) virtualenv = module.params['virtualenv'] for param in specific_params: From 102167f22ecc8dd2dd4c0fed919f02579de18f17 Mon Sep 17 00:00:00 2001 From: Jim Patterson Date: Sun, 30 Nov 2014 19:31:09 -0500 Subject: [PATCH 004/464] Correct check mode for pip in virtualenv. Fix #412. Check mode was always returning changed=True for pip when the target was in a virtualenv. The code now uses the normal tests for determining if change status. --- packaging/language/pip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index 17f52c00398..3ba93185a31 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -314,7 +314,7 @@ def main(): this_dir = os.path.join(this_dir, chdir) if module.check_mode: - if env or extra_args or requirements or state == 'latest' or not name: + if extra_args or requirements or state == 'latest' or not name: module.exit_json(changed=True) elif name.startswith('svn+') or name.startswith('git+') or \ name.startswith('hg+') or name.startswith('bzr+'): From cda40bc33c0da4444bd83ba527b198545ff99085 Mon Sep 17 00:00:00 2001 From: Sebastian Gerhards Date: Tue, 2 Dec 2014 11:33:10 +0100 Subject: [PATCH 005/464] rhn_register: add support for profilename --- packaging/os/rhn_register.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/packaging/os/rhn_register.py b/packaging/os/rhn_register.py index 1e92405c827..4207acc8c28 100644 --- a/packaging/os/rhn_register.py +++ b/packaging/os/rhn_register.py @@ -56,6 +56,11 @@ options: - supply an activation key for use with registration required: False default: null + profilename: + description: + - supply an profilename for use with registration + required: False + default: null channels: description: - Optionally specify a list of comma-separated channels to subscribe to upon successful registration. @@ -73,6 +78,9 @@ EXAMPLES = ''' # Register with activationkey (1-222333444) and enable extended update support. - rhn_register: state=present activationkey=1-222333444 enable_eus=true +# Register with activationkey (1-222333444) and set a profilename which may differ from the hostname. +- rhn_register: state=present activationkey=1-222333444 profilename=host.example.com.custom + # Register as user (joe_user) with password (somepass) against a satellite # server specified by (server_url). - rhn_register: > @@ -209,7 +217,7 @@ class Rhn(RegistrationBase): self.update_plugin_conf('rhnplugin', True) self.update_plugin_conf('subscription-manager', False) - def register(self, enable_eus=False, activationkey=None): + def register(self, enable_eus=False, activationkey=None, profilename=None): ''' Register system to RHN. If enable_eus=True, extended update support will be requested. @@ -221,7 +229,8 @@ class Rhn(RegistrationBase): register_cmd += " --use-eus-channel" if activationkey is not None: register_cmd += " --activationkey '%s'" % activationkey - # FIXME - support --profilename + if profilename is not None: + register_cmd += " --profilename '%s'" % profilename # FIXME - support --systemorgid rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True, use_unsafe_shell=True) @@ -285,6 +294,7 @@ def main(): password = dict(default=None, required=False), server_url = dict(default=rhn.config.get_option('serverURL'), required=False), activationkey = dict(default=None, required=False), + profilename = dict(default=None, required=False), enable_eus = dict(default=False, type='bool'), channels = dict(default=[], type='list'), ) @@ -295,6 +305,7 @@ def main(): rhn.password = module.params['password'] rhn.configure(module.params['server_url']) activationkey = module.params['activationkey'] + profilename = module.params['profilename'] channels = module.params['channels'] rhn.module = module From 18183caf8616967e2a6ee6f10ca679b364a2f6ea Mon Sep 17 00:00:00 2001 From: Alex King Date: Mon, 8 Dec 2014 00:01:55 +1300 Subject: [PATCH 006/464] Extend hashes that can be specified by crypt_scheme beyond those understood by Apache/Nginx. --- web_infrastructure/htpasswd.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/web_infrastructure/htpasswd.py b/web_infrastructure/htpasswd.py index 4a72ea37fec..e263f842fa0 100644 --- a/web_infrastructure/htpasswd.py +++ b/web_infrastructure/htpasswd.py @@ -46,7 +46,10 @@ options: choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] default: "apr_md5_crypt" description: - - Encryption scheme to be used. + - Encryption scheme to be used. As well as the four choices listed + here, you can also use any other hash supported by passlib, such as + md5_crypt and sha256_crypt, which are linux passwd hashes. If you + do so the password file will not be compatible with Apache or Nginx state: required: false choices: [ present, absent ] @@ -74,6 +77,8 @@ EXAMPLES = """ - htpasswd: path=/etc/nginx/passwdfile name=janedoe password=9s36?;fyNp owner=root group=www-data mode=0640 # Remove a user from a password file - htpasswd: path=/etc/apache2/passwdfile name=foobar state=absent +# Add a user to a password file suitable for use by libpam-pwdfile +- htpasswd: path=/etc/mail/passwords name=alex password=oedu2eGh crypt_scheme=md5_crypt """ @@ -81,13 +86,15 @@ import os from distutils.version import StrictVersion try: - from passlib.apache import HtpasswdFile + from passlib.apache import HtpasswdFile, htpasswd_context + from passlib.context import CryptContext import passlib except ImportError: passlib_installed = False else: passlib_installed = True +apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] def create_missing_directories(dest): destpath = os.path.dirname(dest) @@ -99,6 +106,10 @@ def present(dest, username, password, crypt_scheme, create, check_mode): """ Ensures user is present Returns (msg, changed) """ + if crypt_scheme in apache_hashes: + context = htpasswd_context + else: + context = CryptContext(schemes = [ crypt_scheme ] + apache_hashes) if not os.path.exists(dest): if not create: raise ValueError('Destination %s does not exist' % dest) @@ -106,9 +117,9 @@ def present(dest, username, password, crypt_scheme, create, check_mode): return ("Create %s" % dest, True) create_missing_directories(dest) if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): - ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme) + ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context) else: - ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme) + ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context) if getattr(ht, 'set_password', None): ht.set_password(username, password) else: @@ -117,9 +128,9 @@ def present(dest, username, password, crypt_scheme, create, check_mode): return ("Created %s and added %s" % (dest, username), True) else: if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): - ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme) + ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context) else: - ht = HtpasswdFile(dest, default=crypt_scheme) + ht = HtpasswdFile(dest, default=crypt_scheme, context=context) found = None if getattr(ht, 'check_password', None): From 3fca5e587694989cf74808d49341b83b487a782b Mon Sep 17 00:00:00 2001 From: "Federico G. Schwindt" Date: Sun, 14 Dec 2014 22:53:21 +0000 Subject: [PATCH 007/464] Allow globbing in creates= and removes= directives Fixes 1904 --- commands/command.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/commands/command.py b/commands/command.py index c584d6feed8..bc286d6855d 100644 --- a/commands/command.py +++ b/commands/command.py @@ -20,6 +20,7 @@ import sys import datetime +import glob import traceback import re import shlex @@ -161,7 +162,7 @@ def main(): # and the filename already exists. This allows idempotence # of command executions. v = os.path.expanduser(creates) - if os.path.exists(v): + if glob.glob(v): module.exit_json( cmd=args, stdout="skipped, since %s exists" % v, @@ -175,7 +176,7 @@ def main(): # and the filename does not exist. This allows idempotence # of command executions. v = os.path.expanduser(removes) - if not os.path.exists(v): + if not glob.glob(v): module.exit_json( cmd=args, stdout="skipped, since %s does not exist" % v, From 8f3b5c640b98ba9473a0df7ddc650539a3efc048 Mon Sep 17 00:00:00 2001 From: Stefan Junker Date: Sun, 21 Dec 2014 16:10:39 +0100 Subject: [PATCH 008/464] Fix bind-volumes on docker >= 1.4.0 If bind-volumes are submitted to docker >= 1.4.0 with the volumes set in addition to the binds, docker will create a regular volume and not bind-mount the specified path. --- cloud/docker/docker.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index f53819f2679..bbcb73df99b 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -484,20 +484,22 @@ class DockerManager(object): vols = self.module.params.get('volumes') for vol in vols: parts = vol.split(":") + # regular volume + if len(parts) == 1: + self.volumes[parts[0]] = {} # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) - if len(parts) == 2: - self.volumes[parts[1]] = {} - self.binds[parts[0]] = parts[1] - # with bind mode - elif len(parts) == 3: - if parts[2] not in ['ro', 'rw']: - self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') - ro = parts[2] == 'ro' - self.volumes[parts[1]] = {} - self.binds[parts[0]] = {'bind': parts[1], 'ro': ro} - # docker mount (e.g. /www, mounts a docker volume /www on the container at the same location) + elif 2 <= len(parts) <= 3: + # default to read-write + ro = False + # with supplied bind mode + if len(parts) == 3: + if parts[2] not in ['ro', 'rw']: + self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') + else: + ro = parts[2] == 'ro' + self.binds[parts[0]] = {'bind': parts[1], 'ro': ro } else: - self.volumes[parts[0]] = {} + self.module.fail_json(msg='volumes support 1 to 3 arguments') self.lxc_conf = None if self.module.params.get('lxc_conf'): From 00b4f4d543512c0da9b80917988c9d1abab5515a Mon Sep 17 00:00:00 2001 From: sysadmin75 Date: Thu, 25 Dec 2014 17:36:51 -0500 Subject: [PATCH 009/464] Fix to revoke privileges for mysql user = '' Issue #9848 --- database/mysql/mysql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index e160fcb68f6..f4b620c0e9a 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -254,7 +254,7 @@ def privileges_get(cursor, user,host): return x for grant in grants: - res = re.match("GRANT (.+) ON (.+) TO '.+'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0]) + res = re.match("GRANT (.+) ON (.+) TO '.*'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0]) if res is None: raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0]) privileges = res.group(1).split(", ") From d9360a7613318a593d4ed5688269979dc60c7d72 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Fri, 26 Dec 2014 01:29:54 -0500 Subject: [PATCH 010/464] Update docs, add example of using django_manage to run other commands. --- web_infrastructure/django_manage.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py index 3e34a6388c0..d71001fd8c2 100644 --- a/web_infrastructure/django_manage.py +++ b/web_infrastructure/django_manage.py @@ -30,7 +30,8 @@ options: command: choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ] description: - - The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate. Other commands can be entered, but will fail if they're unknown to Django. + - The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate. + - Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run with the I(--noinput) flag. required: true app_path: description: @@ -102,7 +103,7 @@ EXAMPLES = """ # Load the initial_data fixture into the application - django_manage: command=loaddata app_path={{ django_dir }} fixtures={{ initial_data }} -#Run syncdb on the application +# Run syncdb on the application - django_manage: > command=syncdb app_path={{ django_dir }} @@ -110,8 +111,11 @@ EXAMPLES = """ pythonpath={{ settings_dir }} virtualenv={{ virtualenv_dir }} -#Run the SmokeTest test case from the main app. Useful for testing deploys. -- django_manage: command=test app_path=django_dir apps=main.SmokeTest +# Run the SmokeTest test case from the main app. Useful for testing deploys. +- django_manage: command=test app_path={{ django_dir }} apps=main.SmokeTest + +# Create an initial superuser. +- django_manage: command="createsuperuser --noinput --username=admin --email=admin@example.com" app_path={{ django_dir }} """ From aa99eade7e45b6995c5cbc364cb45ff9cdbe2598 Mon Sep 17 00:00:00 2001 From: sysadmin75 Date: Sat, 27 Dec 2014 20:08:25 -0500 Subject: [PATCH 011/464] ansible-modules-core #530 fix - Mount module does not accept spaces in mount point path --- system/mount.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/system/mount.py b/system/mount.py index 9dc6fbe7b8c..0d78d6791f1 100644 --- a/system/mount.py +++ b/system/mount.py @@ -114,6 +114,11 @@ def set_mount(**kwargs): ) args.update(kwargs) + # save the mount name before space replacement + origname = args['name'] + # replace any space in mount name with '\040' to make it fstab compatible (man fstab) + args['name'] = args['name'].replace(' ', r'\040') + new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n' to_write = [] @@ -158,7 +163,8 @@ def set_mount(**kwargs): if changed: write_fstab(to_write, args['fstab']) - return (args['name'], changed) + # mount function needs origname + return (origname, changed) def unset_mount(**kwargs): @@ -173,6 +179,11 @@ def unset_mount(**kwargs): ) args.update(kwargs) + # save the mount name before space replacement + origname = args['name'] + # replace any space in mount name with '\040' to make it fstab compatible (man fstab) + args['name'] = args['name'].replace(' ', r'\040') + to_write = [] changed = False for line in open(args['fstab'], 'r').readlines(): @@ -201,7 +212,8 @@ def unset_mount(**kwargs): if changed: write_fstab(to_write, args['fstab']) - return (args['name'], changed) + # umount needs origname + return (origname, changed) def mount(module, **kwargs): From c5fe40661d26094b93c40ca3fc99e0d9d72840b0 Mon Sep 17 00:00:00 2001 From: tedder Date: Mon, 29 Dec 2014 16:38:08 -0800 Subject: [PATCH 012/464] feature pull request: catch and retry recoverable errors boto can throw SSLError when timeouts occur (among other SSL errors). Catch these so proper JSON can be returned, and also add the ability to retry the operation. There's an open issue in boto for this: https://github.com/boto/boto/issues/2409 Here's a sample stacktrace that inspired me to work on this. I'm on 1.7, but there's no meaningful differences in the 1.8 release that would affect this. I've added line breaks to the trace for readability. failed to parse: Traceback (most recent call last): File "/home/ubuntu/.ansible/tmp/ansible-tmp-1419895753.17-160808281985012/s3", line 2031, in main() File "/home/ubuntu/.ansible/tmp/ansible-tmp-1419895753.17-160808281985012/s3", line 353, in main download_s3file(module, s3, bucket, obj, dest) File "/home/ubuntu/.ansible/tmp/ansible-tmp-1419895753.17-160808281985012/s3", line 234, in download_s3file key.get_contents_to_filename(dest) File "/usr/local/lib/python2.7/dist-packages/boto/s3/key.py", line 1665, in get_contents_to_filename response_headers=response_headers) File "/usr/local/lib/python2.7/dist-packages/boto/s3/key.py", line 1603, in get_contents_to_file response_headers=response_headers) File "/usr/local/lib/python2.7/dist-packages/boto/s3/key.py", line 1435, in get_file query_args=None) File "/usr/local/lib/python2.7/dist-packages/boto/s3/key.py", line 1488, in _get_file_internal for bytes in self: File "/usr/local/lib/python2.7/dist-packages/boto/s3/key.py", line 368, in next data = self.resp.read(self.BufferSize) File "/usr/local/lib/python2.7/dist-packages/boto/connection.py", line 416, in read return httplib.HTTPResponse.read(self, amt) File "/usr/lib/python2.7/httplib.py", line 567, in read s = self.fp.read(amt) File "/usr/lib/python2.7/socket.py", line 380, in read data = self._sock.recv(left) File "/usr/lib/python2.7/ssl.py", line 341, in recv return self.read(buflen) File "/usr/lib/python2.7/ssl.py", line 260, in read return self._sslobj.read(len) ssl.SSLError: The read operation timed out --- cloud/amazon/s3.py | 45 ++++++++++++++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 6438c6405e7..48b4eb73890 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -95,6 +95,13 @@ options: required: false default: null version_added: "1.8" + retries: + description: + - On recoverable failure, how many times to retry before actually failing. + required: false + default: 0 + version_added: "1.9" + requirements: [ "boto" ] author: Lester Wade, Ralph Tice @@ -133,6 +140,7 @@ import sys import os import urlparse import hashlib +from ssl import SSLError try: import boto @@ -237,14 +245,23 @@ def upload_s3file(module, s3, bucket, obj, src, expiry, metadata): except s3.provider.storage_copy_error, e: module.fail_json(msg= str(e)) -def download_s3file(module, s3, bucket, obj, dest): - try: - bucket = s3.lookup(bucket) - key = bucket.lookup(obj) - key.get_contents_to_filename(dest) - module.exit_json(msg="GET operation complete", changed=True) - except s3.provider.storage_copy_error, e: - module.fail_json(msg= str(e)) +def download_s3file(module, s3, bucket, obj, dest, retries): + # retries is the number of loops; range/xrange needs to be one + # more to get that count of loops. + bucket = s3.lookup(bucket) + key = bucket.lookup(obj) + for x in xrange(0, retries + 1): + try: + key.get_contents_to_filename(dest) + module.exit_json(msg="GET operation complete", changed=True) + except s3.provider.storage_copy_error, e: + module.fail_json(msg= str(e)) + except SSLError as e: + # actually fail on last pass through the loop. + if x == retries: + module.fail_json(msg="s3 download failed; %s" % e) + # otherwise, try again, this may be a transient timeout. + pass def download_s3str(module, s3, bucket, obj): try: @@ -292,7 +309,8 @@ def main(): expiry = dict(default=600, aliases=['expiration']), s3_url = dict(aliases=['S3_URL']), overwrite = dict(aliases=['force'], default=True, type='bool'), - metadata = dict(type='dict'), + metadata = dict(type='dict'), + retries = dict(aliases=['retry'], type='str', default=0), ), ) module = AnsibleModule(argument_spec=argument_spec) @@ -307,6 +325,7 @@ def main(): s3_url = module.params.get('s3_url') overwrite = module.params.get('overwrite') metadata = module.params.get('metadata') + retries = int(module.params.get('retries')) ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) @@ -368,7 +387,7 @@ def main(): # If the destination path doesn't exist, no need to md5um etag check, so just download. pathrtn = path_check(dest) if pathrtn is False: - download_s3file(module, s3, bucket, obj, dest) + download_s3file(module, s3, bucket, obj, dest, retries) # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists. if pathrtn is True: @@ -377,13 +396,13 @@ def main(): if md5_local == md5_remote: sum_matches = True if overwrite is True: - download_s3file(module, s3, bucket, obj, dest) + download_s3file(module, s3, bucket, obj, dest, retries) else: module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False) else: sum_matches = False if overwrite is True: - download_s3file(module, s3, bucket, obj, dest) + download_s3file(module, s3, bucket, obj, dest, retries) else: module.fail_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True) @@ -393,7 +412,7 @@ def main(): # At this point explicitly define the overwrite condition. if sum_matches is True and pathrtn is True and overwrite is True: - download_s3file(module, s3, bucket, obj, dest) + download_s3file(module, s3, bucket, obj, dest, retries) # If sum does not match but the destination exists, we From 9e381264ae599788f77a629ce3ffc7d24cf7c20a Mon Sep 17 00:00:00 2001 From: "Federico G. Schwindt" Date: Wed, 7 Jan 2015 04:47:58 +0000 Subject: [PATCH 013/464] Document globbing support --- commands/command.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/command.py b/commands/command.py index bc286d6855d..f9d2b18c921 100644 --- a/commands/command.py +++ b/commands/command.py @@ -47,12 +47,12 @@ options: aliases: [] creates: description: - - a filename, when it already exists, this step will B(not) be run. + - a filename or glob pattern, when it already exists, this step will B(not) be run. required: no default: null removes: description: - - a filename, when it does not exist, this step will B(not) be run. + - a filename or glob pattern, when it does not exist, this step will B(not) be run. version_added: "0.8" required: no default: null From a8b8128aac9f51241c9a3da74ee28aa59c9ead13 Mon Sep 17 00:00:00 2001 From: "Federico G. Schwindt" Date: Thu, 8 Jan 2015 02:06:47 +0000 Subject: [PATCH 014/464] Remove skipped=True when using creates and removes Based on #8645 --- network/basics/uri.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index aac724a8f13..9be0a06cdce 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -381,7 +381,7 @@ def main(): # of uri executions. creates = os.path.expanduser(creates) if os.path.exists(creates): - module.exit_json(stdout="skipped, since %s exists" % creates, skipped=True, changed=False, stderr=False, rc=0) + module.exit_json(stdout="skipped, since %s exists" % creates, changed=False, stderr=False, rc=0) if removes is not None: # do not run the command if the line contains removes=filename @@ -389,7 +389,7 @@ def main(): # of uri executions. v = os.path.expanduser(removes) if not os.path.exists(removes): - module.exit_json(stdout="skipped, since %s does not exist" % removes, skipped=True, changed=False, stderr=False, rc=0) + module.exit_json(stdout="skipped, since %s does not exist" % removes, changed=False, stderr=False, rc=0) # httplib2 only sends authentication after the server asks for it with a 401. From e2abbbefd7d654f7ee05e18828fc1d0d1427a67b Mon Sep 17 00:00:00 2001 From: Jan Brauer Date: Wed, 21 Jan 2015 13:36:36 +0100 Subject: [PATCH 015/464] Handle the case where a stack rollback fails. --- cloud/amazon/cloudformation.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index b382e3f05ff..1f4d77911a8 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -185,6 +185,11 @@ def stack_operation(cfn, stack_name, operation): events = map(str, list(stack.describe_events())), output = 'Stack %s failed' % operation) break + elif '%s_ROLLBACK_FAILED' % operation == stack.stack_status: + result = dict(changed=True, failed=True, + events = map(str, list(stack.describe_events())), + output = 'Stack %s rollback failed' % operation) + break else: time.sleep(5) return result From a935baf7dd24f1f4dd95ca39b8cbbd1c3f17ac66 Mon Sep 17 00:00:00 2001 From: Annika Backstrom Date: Thu, 22 Jan 2015 10:51:09 -0500 Subject: [PATCH 016/464] Force redownload if sha256sum does not match --- network/basics/get_url.py | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index b0d27859420..1fdb90a9da9 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -217,8 +217,29 @@ def main(): dest_is_dir = os.path.isdir(dest) last_mod_time = None + # Remove any non-alphanumeric characters, including the infamous + # Unicode zero-width space + stripped_sha256sum = re.sub(r'\W+', '', sha256sum) + + # Fail early if sha256 is not supported + if sha256sum != '' and not HAS_HASHLIB: + module.fail_json(msg="The sha256sum parameter requires hashlib, which is available in Python 2.5 and higher") + if not dest_is_dir and os.path.exists(dest): - if not force: + checksum_mismatch = False + + # If the download is not forced and there is a checksum, allow + # checksum match to skip the download. + if not force and sha256sum != '': + destination_checksum = module.sha256(dest) + + if stripped_sha256sum.lower() == destination_checksum: + module.exit_json(msg="file already exists", dest=dest, url=url, changed=False) + + checksum_mismatch = True + + # Not forcing redownload, unless sha256sum has already failed + if not force and not checksum_mismatch: module.exit_json(msg="file already exists", dest=dest, url=url, changed=False) # If the file already exists, prepare the last modified time for the @@ -281,15 +302,7 @@ def main(): # Check the digest of the destination file and ensure that it matches the # sha256sum parameter if it is present if sha256sum != '': - # Remove any non-alphanumeric characters, including the infamous - # Unicode zero-width space - stripped_sha256sum = re.sub(r'\W+', '', sha256sum) - - if not HAS_HASHLIB: - os.remove(dest) - module.fail_json(msg="The sha256sum parameter requires hashlib, which is available in Python 2.5 and higher") - else: - destination_checksum = module.sha256(dest) + destination_checksum = module.sha256(dest) if stripped_sha256sum.lower() != destination_checksum: os.remove(dest) From fd061d437c8568f02f5997fd3e7d2b49202b1c5d Mon Sep 17 00:00:00 2001 From: Wouter Bolsterlee Date: Mon, 12 Jan 2015 13:38:42 +0100 Subject: [PATCH 017/464] Fix typo in `version_added` field. --- packaging/language/pip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index 97576a5258b..b8c01ba5247 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -63,7 +63,7 @@ options: default: "no" choices: [ "yes", "no" ] virtualenv_command: - version_aded: "1.1" + version_added: "1.1" description: - The command or a pathname to the command to create the virtual environment with. For example C(pyvenv), C(virtualenv), From 19b49c1d9614fc2680f7d8e752f9eafdeac3a70c Mon Sep 17 00:00:00 2001 From: Wouter Bolsterlee Date: Wed, 28 Jan 2015 12:45:33 +0100 Subject: [PATCH 018/464] A few coding style cleanups --- packaging/language/pip.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index b8c01ba5247..e56c7ef7abd 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -282,14 +282,14 @@ def main(): cmd = '%s %s' % (pip, state_map[state]) # If there's a virtualenv we want things we install to be able to use other - # installations that exist as binaries within this virtualenv. Example: we - # install cython and then gevent -- gevent needs to use the cython binary, - # not just a python package that will be found by calling the right python. + # installations that exist as binaries within this virtualenv. Example: we + # install cython and then gevent -- gevent needs to use the cython binary, + # not just a python package that will be found by calling the right python. # So if there's a virtualenv, we add that bin/ to the beginning of the PATH # in run_command by setting path_prefix here. path_prefix = None if env: - path_prefix="/".join(pip.split('/')[:-1]) + path_prefix = "/".join(pip.split('/')[:-1]) # Automatically apply -e option to extra_args when source is a VCS url. VCS # includes those beginning with svn+, git+, hg+ or bzr+ @@ -350,7 +350,8 @@ def main(): changed = 'Successfully installed' in out_pip module.exit_json(changed=changed, cmd=cmd, name=name, version=version, - state=state, requirements=requirements, virtualenv=env, stdout=out, stderr=err) + state=state, requirements=requirements, virtualenv=env, + stdout=out, stderr=err) # import module snippets from ansible.module_utils.basic import * From 8ce0284ace74cb12af4b264d95dfe4f7af7c1a70 Mon Sep 17 00:00:00 2001 From: Wouter Bolsterlee Date: Wed, 28 Jan 2015 12:45:25 +0100 Subject: [PATCH 019/464] Add a virtualenv_python argument to the pip module This allows specifying the Python version to use when creating the virtualenv. See issue #586. --- packaging/language/pip.py | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index e56c7ef7abd..242a815a93f 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -70,6 +70,14 @@ options: C(virtualenv2), C(~/bin/virtualenv), C(/usr/local/bin/virtualenv). required: false default: virtualenv + virtualenv_python: + version_added: "FIXME" + description: + - The Python executable used for creating the virtual environment. + For example C(python3.4), C(python2.7). When not specified, the + system Python version is used. + required: false + default: null state: description: - The state of module @@ -224,6 +232,7 @@ def main(): virtualenv=dict(default=None, required=False), virtualenv_site_packages=dict(default='no', type='bool'), virtualenv_command=dict(default='virtualenv', required=False), + virtualenv_python=dict(default=None, required=False, type='str'), use_mirrors=dict(default='yes', type='bool'), extra_args=dict(default=None, required=False), chdir=dict(default=None, required=False), @@ -239,6 +248,7 @@ def main(): version = module.params['version'] requirements = module.params['requirements'] extra_args = module.params['extra_args'] + virtualenv_python = module.params['virtualenv_python'] chdir = module.params['chdir'] if state == 'latest' and version is not None: @@ -256,18 +266,21 @@ def main(): if module.check_mode: module.exit_json(changed=True) - virtualenv = os.path.expanduser(virtualenv_command) - if os.path.basename(virtualenv) == virtualenv: - virtualenv = module.get_bin_path(virtualenv_command, True) + cmd = os.path.expanduser(virtualenv_command) + if os.path.basename(cmd) == cmd: + cmd = module.get_bin_path(virtualenv_command, True) if module.params['virtualenv_site_packages']: - cmd = '%s --system-site-packages %s' % (virtualenv, env) + cmd += ' --system-site-packages' else: - cmd_opts = _get_cmd_options(module, virtualenv) + cmd_opts = _get_cmd_options(module, cmd) if '--no-site-packages' in cmd_opts: - cmd = '%s --no-site-packages %s' % (virtualenv, env) - else: - cmd = '%s %s' % (virtualenv, env) + cmd += ' --no-site-packages' + + if virtualenv_python: + cmd += ' -p%s' % virtualenv_python + + cmd = "%s %s" % (cmd, env) this_dir = tempfile.gettempdir() if chdir: this_dir = os.path.join(this_dir, chdir) From eab438ce5fda8e10009facb0670aaa05b0e55a55 Mon Sep 17 00:00:00 2001 From: Raghu Udiyar Date: Sat, 31 Jan 2015 14:12:49 +0530 Subject: [PATCH 020/464] Creating ami should be idempotent If the ami already exists, return details of the duplicate ami instead of failing with an error. --- cloud/amazon/ec2_ami.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_ami.py b/cloud/amazon/ec2_ami.py index ab1f986356b..c2f94ad23bd 100644 --- a/cloud/amazon/ec2_ami.py +++ b/cloud/amazon/ec2_ami.py @@ -20,7 +20,7 @@ module: ec2_ami version_added: "1.3" short_description: create or destroy an image in ec2 description: - - Creates or deletes ec2 images. + - Creates or deletes ec2 images. options: instance_id: description: @@ -162,7 +162,16 @@ def create_image(module, ec2): image_id = ec2.create_image(**params) except boto.exception.BotoServerError, e: - module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + if e.error_code == 'InvalidAMIName.Duplicate': + images = ec2.get_all_images() + for img in images: + if img.name == name: + module.exit_json(msg="AMI name already present", image_id=img.id, state=img.state, changed=False) + sys.exit(0) + else: + module.fail_json(msg="Error in retrieving duplicate AMI details") + else: + module.fail_json(msg="%s: %s" % (e.error_code, e.error_message)) # Wait until the image is recognized. EC2 API has eventual consistency, # such that a successful CreateImage API call doesn't guarantee the success From 6d6e948f1e869811f99eb9b3402234ccf3f24716 Mon Sep 17 00:00:00 2001 From: Alexis Seigneurin Date: Mon, 2 Feb 2015 14:51:04 +0100 Subject: [PATCH 021/464] - 'name' should not be required so as to allow uninstalling a cron_file --- system/cron.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/cron.py b/system/cron.py index c0a39b61c61..ed62674c01c 100644 --- a/system/cron.py +++ b/system/cron.py @@ -46,7 +46,7 @@ options: description: - Description of a crontab entry. default: null - required: true + required: false user: description: - The specific user whose crontab should be modified. @@ -397,7 +397,7 @@ def main(): module = AnsibleModule( argument_spec = dict( - name=dict(required=True), + name=dict(required=False), user=dict(required=False), job=dict(required=False), cron_file=dict(required=False), From cf0e8d62d255481dc63a337408d35666991d0236 Mon Sep 17 00:00:00 2001 From: Jesse Keating Date: Thu, 5 Feb 2015 15:23:35 -0800 Subject: [PATCH 022/464] Add service option to avoid failure on missing service This adds a must_exist option to the service module, which gives callers the ability to be tolerant to services that do not exist. This allows for opportunistic manipulation of a list of services if they happen to exist on the host. While failed_when could be used, it's difficult to track all the different error strings that might come from various service tools regarding a missing service. --- system/service.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/system/service.py b/system/service.py index e1bd250a1ee..e08d404e2b4 100644 --- a/system/service.py +++ b/system/service.py @@ -72,6 +72,14 @@ options: description: - Additional arguments provided on the command line aliases: [ 'args' ] + must_exist: + required: false + default: true + version_added: "1.9" + description: + - Avoid a module failure if the named service does not exist. Useful + for opportunistically starting/stopping/restarting a list of + potential services. ''' EXAMPLES = ''' @@ -95,6 +103,9 @@ EXAMPLES = ''' # Example action to restart network service for interface eth0 - service: name=network state=restarted args=eth0 + +# Example action to restart nova-compute if it exists +- service: name=nova-compute state=restarted must_exist=no ''' import platform @@ -465,7 +476,11 @@ class LinuxService(Service): self.enable_cmd = location['chkconfig'] if self.enable_cmd is None: - self.module.fail_json(msg="no service or tool found for: %s" % self.name) + if self.module.params['must_exist']: + self.module.fail_json(msg="no service or tool found for: %s" % self.name) + else: + # exiting without change on non-existent service + self.module.exit_json(changed=False, exists=False) # If no service control tool selected yet, try to see if 'service' is available if not self.svc_cmd and location.get('service', False): @@ -473,7 +488,11 @@ class LinuxService(Service): # couldn't find anything yet if self.svc_cmd is None and not self.svc_initscript: - self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting') + if self.module.params['must_exist']: + self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting') + else: + # exiting without change on non-existent service + self.module.exit_json(changed=False, exists=False) if location.get('initctl', False): self.svc_initctl = location['initctl'] @@ -1361,6 +1380,7 @@ def main(): enabled = dict(type='bool'), runlevel = dict(required=False, default='default'), arguments = dict(aliases=['args'], default=''), + must_exist = dict(type='bool', default=True), ), supports_check_mode=True ) From bec0f066654ef589dc983abdbf59162896881b57 Mon Sep 17 00:00:00 2001 From: Gugli Date: Fri, 13 Feb 2015 15:06:15 +0100 Subject: [PATCH 023/464] Add support for repos with svn:externals files When a SVN repository has some svn:externals properties, files will be reported with the X attribute, and lines will be added at the end to list externals statuses with a text looking like "Performing status on external item at ....". Such lines were counted as a local modification by the regex, and the module returned a change, even though they were none. To have a clean (and parsable) "svn status" output, it is recommended to use the --quiet option. The externals will only appear if they have been modified. With this option on, it seems even safer to consider there are local modifications when "svn status" outputs anything. --- source_control/subversion.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/source_control/subversion.py b/source_control/subversion.py index f4a0f65fd78..7d49d0a2272 100644 --- a/source_control/subversion.py +++ b/source_control/subversion.py @@ -153,11 +153,10 @@ class Subversion(object): def has_local_mods(self): '''True if revisioned files have been added or modified. Unrevisioned files are ignored.''' - lines = self._exec(["status", self.dest]) - # Match only revisioned files, i.e. ignore status '?'. - regex = re.compile(r'^[^?]') + lines = self._exec(["status", "--quiet", self.dest]) + # The --quiet option will return only modified files. # Has local mods if more than 0 modifed revisioned files. - return len(filter(regex.match, lines)) > 0 + return len(filter(len, lines)) > 0 def needs_update(self): curr, url = self.get_revision() From 80da041eb61c8397b21f0e06d26c7b2c58745417 Mon Sep 17 00:00:00 2001 From: Jens Carl Date: Fri, 20 Feb 2015 17:22:03 +0000 Subject: [PATCH 024/464] - List the name servers of a zone. --- cloud/amazon/route53.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 7fbe8552f41..9454a7ba81b 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -248,7 +248,13 @@ def main(): module.exit_json(changed=False) if command_in == 'get': - module.exit_json(changed=False, set=record) + if type_in == 'NS': + ns = record['values'] + else: + # Retrieve name servers associated to the zone. + ns = conn.get_zone(zone_in).get_nameservers() + + module.exit_json(changed=False, set=record, nameservers=ns) if command_in == 'delete' and not found_record: module.exit_json(changed=False) From d9f8fa56d8af9de402ee9f48ea832709b20754a8 Mon Sep 17 00:00:00 2001 From: gimoh Date: Mon, 23 Feb 2015 14:14:00 +0000 Subject: [PATCH 025/464] Do not insert extra newline if line already contains it When using YAML multi-line strings, e.g.: - lineinfile: dest: /tmp/foo line: > foo bar the line already ends with a newline. If an extra newline is appended unconditionally it will lead to inserting an extra newline on each run. --- files/lineinfile.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/files/lineinfile.py b/files/lineinfile.py index b9fc628e10c..480811dbbfa 100644 --- a/files/lineinfile.py +++ b/files/lineinfile.py @@ -242,8 +242,11 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create, # Don't do backref expansion if not asked. new_line = line - if lines[index[0]] != new_line + os.linesep: - lines[index[0]] = new_line + os.linesep + if not new_line.endswith(os.linesep): + new_line += os.linesep + + if lines[index[0]] != new_line: + lines[index[0]] = new_line msg = 'line replaced' changed = True elif backrefs: From 07d562c58a85dec379da5167c98fcc8fba367a87 Mon Sep 17 00:00:00 2001 From: Trevor Pounds Date: Fri, 27 Feb 2015 01:05:47 -0800 Subject: [PATCH 026/464] Instance deregistration should try all associated ELBs. Fixes #869 --- cloud/amazon/ec2_elb.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py index 11abd827b2b..5e858cc9c3d 100644 --- a/cloud/amazon/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -130,9 +130,9 @@ class ElbManager: for lb in self.lbs: initial_state = self._get_instance_health(lb) if initial_state is None: - # The instance isn't registered with this ELB so just - # return unchanged - return + # Instance isn't registered with this load + # balancer. Ignore it and try the next one. + continue lb.deregister_instances([self.instance_id]) From 19e7e994706d0c0bc603db0bd5ed8f1da56048f6 Mon Sep 17 00:00:00 2001 From: Jonathan Davila Date: Tue, 3 Mar 2015 15:23:06 -0500 Subject: [PATCH 027/464] iam_policy --- cloud/amazon/iam_policy.py | 325 +++++++++++++++++++++++++++++++++++++ 1 file changed, 325 insertions(+) create mode 100644 cloud/amazon/iam_policy.py diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py new file mode 100644 index 00000000000..277877a7254 --- /dev/null +++ b/cloud/amazon/iam_policy.py @@ -0,0 +1,325 @@ +#!/usr/bin/env python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +DOCUMENTATION = ''' +--- +module: iam_policy +short_description: Manage IAM policies for users, groups, and roles +description: + - Allows uploading or removing IAM policies for IAM users, groups or roles. +version_added: "1.9" +options: + iam_type: + description: + - Type of IAM resource + required: true + default: null + choices: [ "user", "group", "role"] + aliases: [] + iam_name: + description: + - Name of IAM resource you wish to target for policy actions. In other words, the user name, group name or role name. + required: true + aliases: [] + policy_name: + description: + - The name label for the policy to create or remove. + required: false + aliases: [] + policy_document: + description: + - The path to the properly json formatted policy file + required: false + aliases: [] + state: + description: + - Whether to create or delete the IAM policy. + required: true + default: null + choices: [ "present", "absent"] + aliases: [] + skip_duplicates: + description: + - By default the module looks for any policies that match the document you pass in, if there is a match it will not make a new policy object with the same rules. You can override this by specifying false which would allow for two policy objects with different names but same rules. + required: false + default: "/" + aliases: [] + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_secret_key', 'secret_key' ] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_access_key', 'access_key' ] + + +requirements: [ "boto" ] +notes: + - 'Currently boto does not support the removal of Managed Policies, the module will not work removing/adding managed policies.' +author: Jonathan I. Davila and Paul Seiffert +''' + +EXAMPLES = ''' +# Create and policy with the name of 'Admin' to the group 'administrators' +tasks: +- name: Create two new IAM users with API keys + iam_policy: + iam_type: group + iam_name: administrators + policy_name: Admin + state: present + policy_document: admin_policy.json + +# Advanced example, create two new groups and add a READ-ONLY policy to both +# groups. +task: +- name: Create Two Groups, Mario and Luigi + iam: + iam_type: group + name: "{{ item }}" + state: present + with_items: + - Mario + - Luigi + register: new_groups + +- name: + iam_policy: + iam_type: group + iam_name: "{{ item.created_group.group_name }}" + policy_name: "READ-ONLY" + policy_document: readonlypolicy.json + state: present + with_items: new_groups.results + +''' +import json +import urllib +import sys +try: + import boto + import boto.iam +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + +def boto_exception(err): + '''generic error message handler''' + if hasattr(err, 'error_message'): + error = err.error_message + elif hasattr(err, 'message'): + error = err.message + else: + error = '%s: %s' % (Exception, err) + + return error + + +def user_action(module, iam, name, policy_name, skip, pdoc, state): + policy_match = False + changed = False + try: + current_policies = [cp for cp in iam.get_all_user_policies(name). + list_user_policies_result. + policy_names] + for pol in current_policies: + ''' + urllib is needed here because boto returns url encoded strings instead + ''' + if urllib.unquote(iam.get_user_policy(name, pol). + get_user_policy_result.policy_document) == pdoc: + policy_match = True + if policy_match: + msg=("The policy document you specified already exists " + "under the name %s." % pol) + if state == 'present' and skip: + if policy_name not in current_policies and not policy_match: + changed = True + iam.put_user_policy(name, policy_name, pdoc) + elif state == 'present' and not skip: + changed = True + iam.put_user_policy(name, policy_name, pdoc) + elif state == 'absent': + try: + iam.delete_user_policy(name, policy_name) + changed = True + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if 'cannot be found.' in error_msg: + changed = False + module.exit_json(changed=changed, msg="%s policy is already absent" % policy_name) + + updated_policies = [cp for cp in iam.get_all_user_policies(name). + list_user_policies_result. + policy_names] + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + module.fail_json(changed=changed, msg=error_msg) + + return changed, name, updated_policies + + +def role_action(module, iam, name, policy_name, skip, pdoc, state): + policy_match = False + changed = False + try: + current_policies = [cp for cp in iam.list_role_policies(name). + list_role_policies_result. + policy_names] + for pol in current_policies: + if urllib.unquote(iam.get_role_policy(name, pol). + get_role_policy_result.policy_document) == pdoc: + policy_match = True + if policy_match: + msg=("The policy document you specified already exists " + "under the name %s." % pol) + if state == 'present' and skip: + if policy_name not in current_policies and not policy_match: + changed = True + iam.put_role_policy(name, policy_name, pdoc) + elif state == 'present' and not skip: + changed = True + iam.put_role_policy(name, policy_name, pdoc) + elif state == 'absent': + try: + iam.delete_role_policy(name, policy_name) + changed = True + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if 'cannot be found.' in error_msg: + changed = False + module.exit_json(changed=changed, + msg="%s policy is already absent" % policy_name) + + updated_policies = [cp for cp in iam.list_role_policies(name). + list_role_policies_result. + policy_names] + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + module.fail_json(changed=changed, msg=error_msg) + + return changed, name, updated_policies + + +def group_action(module, iam, name, policy_name, skip, pdoc, state): + policy_match = False + changed = False + msg='' + try: + current_policies = [cp for cp in iam.get_all_group_policies(name). + list_group_policies_result. + policy_names] + for pol in current_policies: + if urllib.unquote(iam.get_group_policy(name, pol). + get_group_policy_result.policy_document) == pdoc: + policy_match = True + if policy_match: + msg=("The policy document you specified already exists " + "under the name %s." % pol) + if state == 'present' and skip: + if policy_name not in current_policies and not policy_match: + changed = True + iam.put_group_policy(name, policy_name, pdoc) + elif state == 'present' and not skip: + changed = True + iam.put_group_policy(name, policy_name, pdoc) + elif state == 'absent': + try: + iam.delete_group_policy(name, policy_name) + changed = True + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if 'cannot be found.' in error_msg: + changed = False + module.exit_json(changed=changed, + msg="%s policy is already absent" % policy_name) + + updated_policies = [cp for cp in iam.get_all_group_policies(name). + list_group_policies_result. + policy_names] + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + module.fail_json(changed=changed, msg=error_msg) + + return changed, name, updated_policies, msg + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + iam_type=dict( + default=None, required=True, choices=['user', 'group', 'role']), + state=dict( + default=None, required=True, choices=['present', 'absent']), + iam_name=dict(default=None, required=False), + policy_name=dict(default=None, required=True), + policy_document=dict(default=None, required=False), + skip_duplicates=dict(type='bool', default=True, required=False) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + state = module.params.get('state').lower() + iam_type = module.params.get('iam_type').lower() + state = module.params.get('state') + name = module.params.get('iam_name') + policy_name = module.params.get('policy_name') + skip = module.params.get('skip_duplicates') + if module.params.get('policy_document') != None: + with open(module.params.get('policy_document'), 'r') as json_data: + pdoc = json.dumps(json.load(json_data)) + json_data.close() + else: + pdoc=None + + ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + + try: + iam = boto.iam.connection.IAMConnection( + aws_access_key_id=aws_access_key, + aws_secret_access_key=aws_secret_key, + ) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + changed = False + + if iam_type == 'user': + changed, user_name, current_policies = user_action(module, iam, name, + policy_name, skip, pdoc, + state) + module.exit_json(changed=changed, user_name=name, policies=current_policies) + elif iam_type == 'role': + changed, role_name, current_policies = role_action(module, iam, name, + policy_name, skip, pdoc, + state) + module.exit_json(changed=changed, role_name=name, policies=current_policies) + elif iam_type == 'group': + changed, group_name, current_policies, msg = group_action(module, iam, name, + policy_name, skip, pdoc, + state) + module.exit_json(changed=changed, group_name=name, policies=current_policies, msg=msg) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() \ No newline at end of file From 19ec77068074faffa9e2082230541a1c63729700 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 8 Mar 2015 11:47:08 -0400 Subject: [PATCH 028/464] added updated cache time to apt, also started documenting return values --- packaging/os/apt.py | 56 +++++++++++++++++++++++++++++++++------------ 1 file changed, 41 insertions(+), 15 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 8730e22e35d..281175d986e 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -138,6 +138,28 @@ EXAMPLES = ''' - apt: pkg=foo state=build-dep ''' +RETURN = ''' +cache_updated: + description: if the cache was updated or not + returned: success, in some cases + type: boolean + sample: True +cache_update_time: + description: time of the last cache update (0 if unknown) + returned: success, in some cases + type: datetime + sample: 1425828348000 +stdout: + description: output from apt + returned: success, when needed + type: string + sample: "Reading package lists...\nBuilding dependency tree...\nReading state information...\nThe following extra packages will be installed:\n apache2-bin ..." +stderr: + description: error output from apt + returned: success, when needed + type: string + sample: "AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1. Set the 'ServerName' directive globally to ..." +''' import traceback # added to stave off future warnings about apt api @@ -536,6 +558,8 @@ def main(): if not APTITUDE_CMD and p.get('upgrade', None) in [ 'full', 'safe', 'yes' ]: module.fail_json(msg="Could not find aptitude. Please ensure it is installed.") + updated_cache = False + updated_cache_time = 0 install_recommends = p['install_recommends'] dpkg_options = expand_dpkg_options(p['dpkg_options']) @@ -558,41 +582,41 @@ def main(): if p['update_cache']: # Default is: always update the cache cache_valid = False - if p['cache_valid_time']: - tdelta = datetime.timedelta(seconds=p['cache_valid_time']) + now = datetime.datetime.now() + if p.get('cache_valid_time', False): try: mtime = os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime except: - mtime = False - if mtime is False: # Looks like the update-success-stamp is not available # Fallback: Checking the mtime of the lists try: mtime = os.stat(APT_LISTS_PATH).st_mtime except: + # No mtime could be read. We update the cache to be safe mtime = False - if mtime is False: - # No mtime could be read - looks like lists are not there - # We update the cache to be safe - cache_valid = False - else: + + if mtime: + tdelta = datetime.timedelta(seconds=p['cache_valid_time']) mtimestamp = datetime.datetime.fromtimestamp(mtime) - if mtimestamp + tdelta >= datetime.datetime.now(): - # dont update the cache - # the old cache is less than cache_valid_time seconds old - so still valid + if mtimestamp + tdelta >= now: cache_valid = True + updated_cache_time = int(time.mktime(mtimestamp.timetuple())) if cache_valid is not True: cache.update() cache.open(progress=None) + updated_cache = True + updated_cache_time = int(time.mktime(now.timetuple())) if not p['package'] and not p['upgrade'] and not p['deb']: - module.exit_json(changed=False) + module.exit_json(changed=False, cache_updated=updated_cache, cache_update_time=updated_cache_time) + else: + updated_cache = False + updated_cache_time = 0 force_yes = p['force'] if p['upgrade']: - upgrade(module, p['upgrade'], force_yes, - p['default_release'], dpkg_options) + upgrade(module, p['upgrade'], force_yes, p['default_release'], dpkg_options) if p['deb']: if p['state'] != 'present': @@ -622,6 +646,8 @@ def main(): force=force_yes, dpkg_options=dpkg_options, build_dep=state_builddep) (success, retvals) = result + retvals['cache_updated']=updated_cache + retvals['cache_update_time']=updated_cache_time if success: module.exit_json(**retvals) else: From c3f92cca210db1f7042bfce1ff90645255f0b49e Mon Sep 17 00:00:00 2001 From: Stefan Junker Date: Thu, 12 Mar 2015 12:55:14 +0100 Subject: [PATCH 029/464] docker: Use a list instead of a dict for volumes according to the docker-py docs. Do not add host-binds to volumes list. --- cloud/docker/docker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 6e571a7ba5d..fcc14b5a5e0 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -474,13 +474,13 @@ class DockerManager(object): self.volumes = None if self.module.params.get('volumes'): self.binds = {} - self.volumes = {} + self.volumes = [] vols = self.module.params.get('volumes') for vol in vols: parts = vol.split(":") # regular volume if len(parts) == 1: - self.volumes[parts[0]] = {} + self.volumes.append(parts[0]) # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) elif 2 <= len(parts) <= 3: # default to read-write From 83c6cd04f48c6388a075af5d9a709667b8f007b9 Mon Sep 17 00:00:00 2001 From: Tagir Bakirov Date: Fri, 13 Mar 2015 11:07:13 +0100 Subject: [PATCH 030/464] added 'absent' option to supervisorctl --- web_infrastructure/supervisorctl.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/web_infrastructure/supervisorctl.py b/web_infrastructure/supervisorctl.py index f75992b9a6a..c3b52d0a79d 100644 --- a/web_infrastructure/supervisorctl.py +++ b/web_infrastructure/supervisorctl.py @@ -64,7 +64,7 @@ options: - The desired state of program/group. required: true default: null - choices: [ "present", "started", "stopped", "restarted" ] + choices: [ "present", "started", "stopped", "restarted", "absent" ] supervisorctl_path: description: - path to supervisorctl executable @@ -101,7 +101,7 @@ def main(): username=dict(required=False), password=dict(required=False), supervisorctl_path=dict(required=False), - state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped']) + state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent']) ) module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) @@ -196,6 +196,19 @@ def main(): processes = get_matched_processes() + if state == 'absent': + if len(processes) == 0: + module.exit_json(changed=False, name=name, state=state) + + if module.check_mode: + module.exit_json(changed=True) + run_supervisorctl('reread', check_rc=True) + rc, out, err = run_supervisorctl('remove', name) + if '%s: removed process group' % name in out: + module.exit_json(changed=True, name=name, state=state) + else: + module.fail_json(msg=out, name=name, state=state) + if state == 'present': if len(processes) > 0: module.exit_json(changed=False, name=name, state=state) From 6c1687e68eb424f56eb1157266436f1c68200a40 Mon Sep 17 00:00:00 2001 From: whiter Date: Wed, 18 Mar 2015 14:52:49 +1100 Subject: [PATCH 031/464] Added changed=True flag when new subnet groups created. Added conditional so that modify_db_subnet_group is only called when necessary and changed=True flag will be set. --- cloud/amazon/rds_subnet_group.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/rds_subnet_group.py b/cloud/amazon/rds_subnet_group.py index 9731154f77c..bfe0fe072d0 100644 --- a/cloud/amazon/rds_subnet_group.py +++ b/cloud/amazon/rds_subnet_group.py @@ -149,10 +149,14 @@ def main(): else: if not exists: new_group = conn.create_db_subnet_group(group_name, desc=group_description, subnet_ids=group_subnets) - + changed = True else: - changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets) - + # Sort the subnet groups before we compare them + matching_groups[0].subnet_ids.sort() + group_subnets.sort() + if ( (matching_groups[0].name != group_name) or (matching_groups[0].description != group_description) or (matching_groups[0].subnet_ids != group_subnets) ): + changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets) + changed = True except BotoServerError, e: module.fail_json(msg = e.error_message) From 9a6eba43ef9ecb984360d0c620b4ade722c9b248 Mon Sep 17 00:00:00 2001 From: Demian Gemperli Date: Wed, 18 Mar 2015 19:10:54 +0100 Subject: [PATCH 032/464] Allow to add build flags for gem installation --- packaging/language/gem.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/packaging/language/gem.py b/packaging/language/gem.py index 3740a3e7ce3..54d06da7466 100644 --- a/packaging/language/gem.py +++ b/packaging/language/gem.py @@ -73,6 +73,11 @@ options: required: false default: "no" version_added: "1.6" + build_flags: + description: + - Allow adding build flags for gem compilation + required: false + version_added: "2.0" author: Johan Wiren ''' @@ -185,6 +190,8 @@ def install(module): cmd.append('--no-rdoc') cmd.append('--no-ri') cmd.append(module.params['gem_source']) + if module.params['build_flags']: + cmd.extend([ '--', module.params['build_flags'] ]) module.run_command(cmd, check_rc=True) def main(): @@ -198,8 +205,9 @@ def main(): repository = dict(required=False, aliases=['source'], type='str'), state = dict(required=False, default='present', choices=['present','absent','latest'], type='str'), user_install = dict(required=False, default=True, type='bool'), - pre_release = dict(required=False, default=False, type='bool'), + pre_release = dict(required=False, default=False, type='bool'), version = dict(required=False, type='str'), + build_flags = dict(required=False, type='str'), ), supports_check_mode = True, mutually_exclusive = [ ['gem_source','repository'], ['gem_source','version'] ], From bdc28a6bb351688f54ffab8ca6ca7d572e4f8f67 Mon Sep 17 00:00:00 2001 From: Ian Clegg Date: Fri, 20 Mar 2015 10:34:36 +0000 Subject: [PATCH 033/464] Added support for comma seperated feature names in the name parameter of the win_feature module --- windows/win_feature.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_feature.ps1 b/windows/win_feature.ps1 index a54007b47bf..458d942e328 100644 --- a/windows/win_feature.ps1 +++ b/windows/win_feature.ps1 @@ -28,7 +28,7 @@ $result = New-Object PSObject -Property @{ } If ($params.name) { - $name = $params.name + $name = $params.name -split ',' | % { $_.Trim() } } Else { Fail-Json $result "mising required argument: name" From 1f358721ffbaa302e7f352abc3d7291a3f238798 Mon Sep 17 00:00:00 2001 From: Isaac Simmons Date: Mon, 23 Mar 2015 11:46:59 -0400 Subject: [PATCH 034/464] Handle ini file properties that contain interpolation errors in the initial values --- files/ini_file.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/files/ini_file.py b/files/ini_file.py index e247c265fc8..ed3bb545702 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -120,6 +120,9 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese if cp.get(section, option): cp.remove_option(section, option) changed = True + except ConfigParser.InterpolationError: + cp.remove_option(section, option) + changed = True except: pass @@ -143,6 +146,9 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese except ConfigParser.NoOptionError: cp.set(section, option, value) changed = True + except ConfigParser.InterpolationError: + cp.set(section, option, value) + changed = True if changed and not module.check_mode: if backup: From fc4c6594003094cd2b8f6ee92847d03c69205fc5 Mon Sep 17 00:00:00 2001 From: Jesse Sandberg Date: Thu, 26 Mar 2015 16:12:18 +0200 Subject: [PATCH 035/464] Validate variable, return only the found variable value instead of tuple Docs imply the mysql_variables is used to operate a single variable therefore - fail before making any db connections if variable is not set - validate chars for mysql variable name with re.match(^[a-z0-9_]+) - use "SHOW VARIABLE WHERE Variable_name" instead of LIKE search - getvariable() returns only the value or None if variable is not found - the module returns only the found variable value instead of tuple for easier operation eg. as registere variable in tasks --- database/mysql/mysql_variables.py | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py index 199c5eb6eca..7c9ec4459a9 100644 --- a/database/mysql/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -67,6 +67,7 @@ EXAMPLES = ''' import ConfigParser import os import warnings +from re import match try: import MySQLdb @@ -103,10 +104,12 @@ def typedvalue(value): def getvariable(cursor, mysqlvar): - cursor.execute("SHOW VARIABLES LIKE %s", (mysqlvar,)) + cursor.execute("SHOW VARIABLES WHERE Variable_name = %s", (mysqlvar,)) mysqlvar_val = cursor.fetchall() - return mysqlvar_val - + if len(mysqlvar_val) is 1: + return mysqlvar_val[0][1] + else: + return None def setvariable(cursor, mysqlvar, value): """ Set a global mysql variable to a given value @@ -116,11 +119,9 @@ def setvariable(cursor, mysqlvar, value): should be passed as numeric literals. """ - query = ["SET GLOBAL %s" % mysql_quote_identifier(mysqlvar, 'vars') ] - query.append(" = %s") - query = ' '.join(query) + query = "SET GLOBAL %s = " % mysql_quote_identifier(mysqlvar, 'vars') try: - cursor.execute(query, (value,)) + cursor.execute(query + "%s", (value,)) cursor.fetchall() result = True except Exception, e: @@ -204,6 +205,10 @@ def main(): host = module.params["login_host"] mysqlvar = module.params["variable"] value = module.params["value"] + if mysqlvar is None: + module.fail_json(msg="Cannot run without variable to operate with") + if match('^[0-9a-z_]+$', mysqlvar) is None: + module.fail_json(msg="invalid variable name \"%s\"" % mysqlvar) if not mysqldb_found: module.fail_json(msg="the python mysqldb module is required") else: @@ -232,17 +237,15 @@ def main(): cursor = db_connection.cursor() except Exception, e: module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") - if mysqlvar is None: - module.fail_json(msg="Cannot run without variable to operate with") mysqlvar_val = getvariable(cursor, mysqlvar) + if mysqlvar_val is None: + module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False) if value is None: module.exit_json(msg=mysqlvar_val) else: - if len(mysqlvar_val) < 1: - module.fail_json(msg="Variable not available", changed=False) # Type values before using them value_wanted = typedvalue(value) - value_actual = typedvalue(mysqlvar_val[0][1]) + value_actual = typedvalue(mysqlvar_val) if value_wanted == value_actual: module.exit_json(msg="Variable already set to requested value", changed=False) try: From 7f3df5634a40ea007b76e6db7bcf8de8dd4df646 Mon Sep 17 00:00:00 2001 From: nemunaire Date: Fri, 27 Mar 2015 18:07:35 +0100 Subject: [PATCH 036/464] Add optional signal parameter when killing docker container --- cloud/docker/docker.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 85eb0525a69..1f30938964a 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -176,6 +176,12 @@ options: description: - Enable detached mode to leave the container running in background. default: true + signal: + description: + - With the state "killed", you can alter the signal sent to the + container. + required: false + default: KILL state: description: - Assert the container's desired state. "present" only asserts that the @@ -1272,7 +1278,7 @@ class DockerManager(object): def kill_containers(self, containers): for i in containers: - self.client.kill(i['Id']) + self.client.kill(i['Id'], self.module.params.get('signal')) self.increment_counter('killed') def restart_containers(self, containers): @@ -1436,6 +1442,7 @@ def main(): dns = dict(), detach = dict(default=True, type='bool'), state = dict(default='started', choices=['present', 'started', 'reloaded', 'restarted', 'stopped', 'killed', 'absent', 'running']), + signal = dict(default=None), restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']), restart_policy_retry = dict(default=0, type='int'), debug = dict(default=False, type='bool'), From b28459e6f5f5053d7ed0282aa061d994a95feb40 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Mon, 30 Mar 2015 15:52:17 -0400 Subject: [PATCH 037/464] Wait for process exit on detached=no. --- cloud/docker/docker.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 7eea7888059..2f5cb8690d9 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -174,7 +174,8 @@ options: default: null detach: description: - - Enable detached mode to leave the container running in background. + - Enable detached mode to leave the container running in background. If + disabled, fail unless the process exits cleanly. default: true state: description: @@ -1258,6 +1259,13 @@ class DockerManager(object): self.client.start(i['Id'], **params) self.increment_counter('started') + if not self.module.params.get('detach'): + status = self.client.wait(i['Id']) + if status != 0: + output = self.client.logs(i['Id'], stdout=True, stderr=True, + stream=False, timestamps=False) + self.module.fail_json(status=status, msg=output) + def stop_containers(self, containers): for i in containers: self.client.stop(i['Id']) From 539b7744d0a16895784238414e22f68d6f0d46da Mon Sep 17 00:00:00 2001 From: Dennis Rowe Date: Tue, 31 Mar 2015 09:59:07 -0500 Subject: [PATCH 038/464] Strip spaces around perms --- database/mysql/mysql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 5901771f6ad..0dfee55926d 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -291,7 +291,7 @@ def privileges_unpack(priv): pieces[0][idx] = "`" + pieces[0][idx] + "`" pieces[0] = '.'.join(pieces[0]) - output[pieces[0]] = pieces[1].upper().split(',') + output[pieces[0]] = map(lambda s: s.strip(), pieces[1].upper().split(',')) new_privs = frozenset(output[pieces[0]]) if not new_privs.issubset(VALID_PRIVS): raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS)) From 4656797389f2162a304ba24873eab66471143b41 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 31 Mar 2015 19:28:02 -0400 Subject: [PATCH 039/464] Add Ironic Node module --- cloud/openstack/os_ironic_node.py | 155 ++++++++++++++++++++++++++++++ 1 file changed, 155 insertions(+) create mode 100644 cloud/openstack/os_ironic_node.py diff --git a/cloud/openstack/os_ironic_node.py b/cloud/openstack/os_ironic_node.py new file mode 100644 index 00000000000..386a5f9fe84 --- /dev/null +++ b/cloud/openstack/os_ironic_node.py @@ -0,0 +1,155 @@ +#!/usr/bin/python +# coding: utf-8 -*- + +# (c) 2014, Hewlett-Packard Development Company, L.P. +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +DOCUMENTATION = ''' +--- +module: os_ironic_node +version_added: "1.10" +short_description: Activate/Deactivate Bare Metal Resources from OpenStack +extends_documentation_fragment: openstack +description: + - Deploy to nodes controlled by Ironic. +options: + state: + description: + - Indicates desired state of the resource + choices: ['present', 'absent'] + default: present + uuid: + description: + - globally unique identifier (UUID) to be given to the resource. + required: false + default: None + ironic_url: + description: + - If noauth mode is utilized, this is required to be set to the + endpoint URL for the Ironic API. Use with "auth" and "auth_plugin" + settings set to None. + required: false + default: None + config_drive: + description: + - A configdrive file or HTTP(S) URL that will be passed along to the + node. + required: false + default: None + instance_info: + description: + - Definition of the instance information which is used to deploy + the node. + image_source: + description: + - An HTTP(S) URL where the image can be retrieved from. + image_checksum: + description: + - The checksum of image_source. + image_disk_format: + description: + - The type of image that has been requested to be deployed. +requirements: ["shade"] +''' + +EXAMPLES = ''' +# Activate a node by booting an image with a configdrive attached +os_ironic_node: + cloud: "openstack" + uuid: "d44666e1-35b3-4f6b-acb0-88ab7052da69" + state: present + config_drive: "http://192.168.1.1/host-configdrive.iso" + instance_info: + image_source: "http://192.168.1.1/deploy_image.img" + image_checksum: "356a6b55ecc511a20c33c946c4e678af" + image_disk_format: "qcow" + delegate_to: localhost +''' + + +def _prepare_instance_info_patch(instance_info): + patch = [] + patch.append({ + 'op': 'replace', + 'path': '/instance_info', + 'value': instance_info + }) + return patch + + +def main(): + argument_spec = openstack_full_argument_spec( + uuid=dict(required=True), + instance_info=dict(type='dict', required=True), + config_drive=dict(required=False), + ironic_url=dict(required=False), + ) + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + if (module.params['auth_plugin'] == 'None' and + module.params['ironic_url'] is None): + module.fail_json(msg="Authentication appears disabled, Please " + "define an ironic_url parameter") + + if module.params['ironic_url'] and module.params['auth_plugin'] == 'None': + module.params['auth'] = dict(endpoint=module.params['ironic_url']) + + try: + cloud = shade.operator_cloud(**module.params) + server = cloud.get_machine_by_uuid(module.params['uuid']) + instance_info = module.params['instance_info'] + uuid = module.params['uuid'] + if module.params['state'] == 'present': + if server is None: + module.fail_json(msg="node not found") + else: + # TODO: compare properties here and update if necessary + # ... but the interface for that is terrible! + if server.provision_state is "active": + module.exit_json( + changed=False, + result="Node already in an active state" + ) + + patch = _prepare_instance_info_patch(instance_info) + cloud.set_node_instance_info(uuid, patch) + cloud.validate_node(uuid) + cloud.activate_node(uuid, module.params['config_drive']) + # TODO: Add more error checking and a wait option. + module.exit_json(changed=False, result="node activated") + + if module.params['state'] == 'absent': + if server.provision_state is not "deleted": + cloud.purge_node_instance_info(uuid) + cloud.deactivate_node(uuid) + module.exit_json(changed=True, result="deleted") + else: + module.exit_json(changed=False, result="node not found") + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() From f2c661982660b5bc71931104d53926d94f0bdc6b Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 31 Mar 2015 16:37:07 -0400 Subject: [PATCH 040/464] Add OpenStack Object module --- cloud/openstack/os_object.py | 125 +++++++++++++++++++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 cloud/openstack/os_object.py diff --git a/cloud/openstack/os_object.py b/cloud/openstack/os_object.py new file mode 100644 index 00000000000..ed58bb1e705 --- /dev/null +++ b/cloud/openstack/os_object.py @@ -0,0 +1,125 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + + +DOCUMENTATION = ''' +--- +module: os_object +short_description: Create or Delete objects and containers from OpenStack +version_added: "1.10" +extends_documentation_fragment: openstack +description: + - Create or Delete objects and containers from OpenStack +options: + container: + description: + - The name of the container in which to create the object + required: true + name: + description: + - Name to be give to the object. If omitted, operations will be on + the entire container + required: false + file: + description: + - Path to local file to be uploaded. + required: false + container_access: + description: + - desired container access level. + required: false + choices: ['private', 'public'] + default: private + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +requirements: ["shade"] +''' + +EXAMPLES = ''' +# Creates a object named 'fstab' in the 'config' container +- os_object: cloud=mordred state=present name=fstab container=config file=/etc/fstab + +# Deletes a container called config and all of its contents +- os_object: cloud=rax-iad state=absent container=config +''' + + +def process_object( + cloud_obj, container, name, filename, container_access, **kwargs): + + changed = False + container_obj = cloud_obj.get_container(container) + if kwargs['state'] == 'present': + if not container_obj: + container_obj = cloud_obj.create_container(container) + changed = True + if cloud_obj.get_container_access(container) != container_access: + cloud_obj.set_container_access(container, container_access) + changed = True + if name: + if cloud_obj.is_object_stale(container, name, filename): + cloud_obj.create_object(container, name, filename) + changed = True + else: + if container_obj: + if name: + if cloud_obj.get_object_metadata(container, name): + cloud_obj.delete_object(container, name) + changed= True + else: + cloud_obj.delete_container(container) + changed= True + return changed + + +def main(): + argument_spec = openstack_full_argument_spec( + name=dict(required=False, default=None), + container=dict(required=True), + filename=dict(required=False, default=None), + container_access=dict(default='private', choices=['private', 'public']), + state=dict(default='present', choices=['absent', 'present']), + ) + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + try: + cloud = shade.openstack_cloud(**module.params) + + changed = process_object(cloud, **module.params) + + module.exit_json(changed=changed, result="success") + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() From c5f5dfd8a0b73a384a6970113a1012a05bf5276d Mon Sep 17 00:00:00 2001 From: Dan Abel Date: Fri, 31 Oct 2014 14:46:32 +0000 Subject: [PATCH 041/464] use aws connect calls that allow boto profile use --- cloud/amazon/s3.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index e7d017f58ea..2ca36a85bdc 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -116,6 +116,7 @@ from boto.s3.connection import OrdinaryCallingFormat try: import boto + import boto.ec2 from boto.s3.connection import Location from boto.s3.connection import S3Connection except ImportError: From f38186ce8b49ea98e29241712da45917a3154e73 Mon Sep 17 00:00:00 2001 From: Andrew Davis Date: Fri, 3 Apr 2015 12:41:10 -0700 Subject: [PATCH 042/464] ansible_facts reflects 1.8 output --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 49e11f3caa0..6cb9802410e 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1504,7 +1504,7 @@ def main(): summary=manager.counters, containers=containers.changed, reload_reasons=manager.get_reload_reason_message(), - ansible_facts=_ansible_facts(containers.changed)) + ansible_facts=_ansible_facts(manager.get_inspect_containers(containers.changed))) except DockerAPIError as e: module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation) From fb7ff9271aaf29343d10c0e50a4017ab07677185 Mon Sep 17 00:00:00 2001 From: Pepe Barbe Date: Mon, 6 Apr 2015 23:25:29 -0500 Subject: [PATCH 043/464] Add OS architecture and OS name to the Windows setup module --- windows/setup.ps1 | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/windows/setup.ps1 b/windows/setup.ps1 index c249251d974..32b4d865263 100644 --- a/windows/setup.ps1 +++ b/windows/setup.ps1 @@ -25,6 +25,7 @@ $result = New-Object psobject @{ changed = $false }; +$win32_os = Get-WmiObject Win32_OperatingSystem $osversion = [Environment]::OSVersion $memory = @() $memory += Get-WmiObject win32_Physicalmemory @@ -53,10 +54,13 @@ foreach ($adapter in $ActiveNetcfg) Set-Attr $result.ansible_facts "ansible_interfaces" $formattednetcfg +Set-Attr $result.ansible_facts "ansible_architecture" $win32_os.OSArchitecture + Set-Attr $result.ansible_facts "ansible_hostname" $env:COMPUTERNAME; Set-Attr $result.ansible_facts "ansible_fqdn" "$([System.Net.Dns]::GetHostByName((hostname)).HostName)" Set-Attr $result.ansible_facts "ansible_system" $osversion.Platform.ToString() Set-Attr $result.ansible_facts "ansible_os_family" "Windows" +Set-Attr $result.ansible_facts "ansible_os_name" $win32_os.Name.Split('|')[0] Set-Attr $result.ansible_facts "ansible_distribution" $osversion.VersionString Set-Attr $result.ansible_facts "ansible_distribution_version" $osversion.Version.ToString() From b67e1a1ce928294610b2739d2f3543ee4c484260 Mon Sep 17 00:00:00 2001 From: James Martin Date: Fri, 27 Mar 2015 21:41:53 -0400 Subject: [PATCH 044/464] Streamlined rolling udpate algorithm. Still need to account for partials, and not waiting for instances if we're mass terminating them. --- cloud/amazon/ec2_asg.py | 288 +++++++++++++++++++++++++++++++--------- 1 file changed, 227 insertions(+), 61 deletions(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index e040ad48067..f95f7b79221 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -191,9 +191,13 @@ to "replace_instances": import sys import time +import logging as log from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * +log.getLogger('boto').setLevel(log.CRITICAL) +#log.basicConfig(filename='/tmp/ansible_ec2_asg.log',level=log.DEBUG, format='%(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') + try: import boto.ec2.autoscale @@ -266,8 +270,71 @@ def get_properties(autoscaling_group): if getattr(autoscaling_group, "tags", None): properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags) + return properties +def elb_dreg(asg_connection, module, group_name, instance_id): + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + as_group = asg_connection.get_all_groups(names=[group_name])[0] + wait_timeout = module.params.get('wait_timeout') + props = get_properties(as_group) + count = 1 + if as_group.load_balancers and as_group.health_check_type == 'ELB': + try: + elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + else: + return + + exists = True + for lb in as_group.load_balancers: + elb_connection.deregister_instances(lb, instance_id) + log.debug("De-registering {0} from ELB {1}".format(instance_id, lb)) + + wait_timeout = time.time() + wait_timeout + while wait_timeout > time.time() and count > 0: + count = 0 + for lb in as_group.load_balancers: + lb_instances = elb_connection.describe_instance_health(lb) + for i in lb_instances: + if i.instance_id == instance_id and i.state == "InService": + count += 1 + log.debug("{0}: {1}, {2}".format(i.instance_id, i.state, i.description)) + time.sleep(10) + + if wait_timeout <= time.time(): + # waiting took too long + module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime())) + + + + +def elb_healthy(asg_connection, elb_connection, module, group_name): + healthy_instances = [] + as_group = asg_connection.get_all_groups(names=[group_name])[0] + props = get_properties(as_group) + # get healthy, inservice instances from ASG + instances = [] + for instance, settings in props['instance_facts'].items(): + if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': + instances.append(instance) + log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances)) + log.debug("ELB instance status:") + for lb in as_group.load_balancers: + # we catch a race condition that sometimes happens if the instance exists in the ASG + # but has not yet show up in the ELB + try: + lb_instances = elb_connection.describe_instance_health(lb, instances=instances) + except boto.exception.InvalidInstance, e: + pass + for i in lb_instances: + if i.state == "InService": + healthy_instances.append(i.instance_id) + log.debug("{0}: {1}".format(i.instance_id, i.state)) + return len(healthy_instances) + + def wait_for_elb(asg_connection, module, group_name): region, ec2_url, aws_connect_params = get_aws_connection_info(module) @@ -278,36 +345,23 @@ def wait_for_elb(asg_connection, module, group_name): as_group = asg_connection.get_all_groups(names=[group_name])[0] if as_group.load_balancers and as_group.health_check_type == 'ELB': + log.debug("Waiting for ELB to consider intances healthy.") try: elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) wait_timeout = time.time() + wait_timeout - healthy_instances = {} + healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name) - while len(healthy_instances.keys()) < as_group.min_size and wait_timeout > time.time(): - as_group = asg_connection.get_all_groups(names=[group_name])[0] - props = get_properties(as_group) - # get healthy, inservice instances from ASG - instances = [] - for instance, settings in props['instance_facts'].items(): - if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': - instances.append(instance) - for lb in as_group.load_balancers: - # we catch a race condition that sometimes happens if the instance exists in the ASG - # but has not yet show up in the ELB - try: - lb_instances = elb_connection.describe_instance_health(lb, instances=instances) - except boto.exception.InvalidInstance, e: - pass - for i in lb_instances: - if i.state == "InService": - healthy_instances[i.instance_id] = i.state + while healthy_instances < as_group.min_size and wait_timeout > time.time(): + healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name) + log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances)) time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime()) + log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances)) def create_autoscaling_group(connection, module): group_name = module.params.get('name') @@ -365,7 +419,7 @@ def create_autoscaling_group(connection, module): try: connection.create_auto_scaling_group(ag) if wait_for_instances == True: - wait_for_new_instances(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') + wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') wait_for_elb(connection, module, group_name) as_group = connection.get_all_groups(names=[group_name])[0] asg_properties = get_properties(as_group) @@ -431,7 +485,7 @@ def create_autoscaling_group(connection, module): module.fail_json(msg=str(e)) if wait_for_instances == True: - wait_for_new_instances(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') + wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') wait_for_elb(connection, module, group_name) try: as_group = connection.get_all_groups(names=[group_name])[0] @@ -472,6 +526,15 @@ def get_chunks(l, n): for i in xrange(0, len(l), n): yield l[i:i+n] +def update_size(group, max_size, min_size, dc): + + log.debug("setting ASG sizes") + log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size )) + group.max_size = max_size + group.min_size = min_size + group.desired_capacity = dc + group.update() + def replace(connection, module): batch_size = module.params.get('replace_batch_size') wait_timeout = module.params.get('wait_timeout') @@ -479,91 +542,191 @@ def replace(connection, module): max_size = module.params.get('max_size') min_size = module.params.get('min_size') desired_capacity = module.params.get('desired_capacity') - - # FIXME: we need some more docs about this feature + lc_check = module.params.get('lc_check') replace_instances = module.params.get('replace_instances') as_group = connection.get_all_groups(names=[group_name])[0] - wait_for_new_instances(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances') + wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances') props = get_properties(as_group) instances = props['instances'] - replaceable = 0 if replace_instances: instances = replace_instances - for k in props['instance_facts'].keys(): - if k in instances: - if props['instance_facts'][k]['launch_config_name'] != props['launch_config_name']: - replaceable += 1 - if replaceable == 0: + # check to see if instances are replaceable if checking launch configs + + new_instances, old_instances = get_instances_by_lc(props, lc_check, instances) + num_new_inst_needed = desired_capacity - len(new_instances) + + if lc_check: + if num_new_inst_needed == 0 and old_instances: + log.debug("No new instances needed, but old instances are present. Removing old instances") + terminate_batch(connection, module, old_instances, instances, True) + as_group = connection.get_all_groups(names=[group_name])[0] + props = get_properties(as_group) + changed = True + return(changed, props) + + # we don't want to spin up extra instances if not necessary + if num_new_inst_needed < batch_size: + log.debug("Overriding batch size to {0}".format(num_new_inst_needed)) + batch_size = num_new_inst_needed + + if not old_instances: changed = False return(changed, props) # set temporary settings and wait for them to be reached + # This should get overriden if the number of instances left is less than the batch size. + as_group = connection.get_all_groups(names=[group_name])[0] - as_group.max_size = max_size + batch_size - as_group.min_size = min_size + batch_size - as_group.desired_capacity = desired_capacity + batch_size - as_group.update() - wait_for_new_instances(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances') + update_size(as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size) + wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances') wait_for_elb(connection, module, group_name) as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) instances = props['instances'] if replace_instances: instances = replace_instances + log.debug("beginning main loop") for i in get_chunks(instances, batch_size): - terminate_batch(connection, module, i) - wait_for_new_instances(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances') + # break out of this loop if we have enough new instances + break_early, desired_size, term_instances = terminate_batch(connection, module, i, instances, False) + wait_for_term_inst(connection, module, term_instances) + wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, 'viable_instances') wait_for_elb(connection, module, group_name) as_group = connection.get_all_groups(names=[group_name])[0] - # return settings to normal - as_group.max_size = max_size - as_group.min_size = min_size - as_group.desired_capacity = desired_capacity - as_group.update() + if break_early: + log.debug("breaking loop") + break + update_size(as_group, max_size, min_size, desired_capacity) as_group = connection.get_all_groups(names=[group_name])[0] asg_properties = get_properties(as_group) + log.debug("Rolling update complete.") changed=True return(changed, asg_properties) -def terminate_batch(connection, module, replace_instances): - group_name = module.params.get('name') - wait_timeout = int(module.params.get('wait_timeout')) - lc_check = module.params.get('lc_check') - - as_group = connection.get_all_groups(names=[group_name])[0] - props = get_properties(as_group) +def get_instances_by_lc(props, lc_check, initial_instances): - # check to make sure instances given are actually in the given ASG - # and they have a non-current launch config + new_instances = [] old_instances = [] + # old instances are those that have the old launch config + if lc_check: + for i in props['instances']: + if props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']: + new_instances.append(i) + else: + old_instances.append(i) + + else: + log.debug("Comparing initial instances with current: {0}".format(initial_instances)) + for i in props['instances']: + if i not in initial_instances: + new_instances.append(i) + else: + old_instances.append(i) + log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances)) + log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances)) + + return new_instances, old_instances + + +def list_purgeable_instances(props, lc_check, replace_instances, initial_instances): + instances_to_terminate = [] instances = ( inst_id for inst_id in replace_instances if inst_id in props['instances']) + # check to make sure instances given are actually in the given ASG + # and they have a non-current launch config if lc_check: for i in instances: if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']: - old_instances.append(i) + instances_to_terminate.append(i) else: - old_instances = instances + for i in instances: + if i in initial_instances: + instances_to_terminate.append(i) + return instances_to_terminate - # set all instances given to unhealthy - for instance_id in old_instances: - connection.set_instance_health(instance_id,'Unhealthy') +def terminate_batch(connection, module, replace_instances, initial_instances, leftovers=False): + batch_size = module.params.get('replace_batch_size') + min_size = module.params.get('min_size') + desired_capacity = module.params.get('desired_capacity') + group_name = module.params.get('name') + wait_timeout = int(module.params.get('wait_timeout')) + lc_check = module.params.get('lc_check') + decrement_capacity = False + break_loop = False + + as_group = connection.get_all_groups(names=[group_name])[0] + props = get_properties(as_group) + desired_size = as_group.min_size + + new_instances, old_instances = get_instances_by_lc(props, lc_check, initial_instances) + num_new_inst_needed = desired_capacity - len(new_instances) + + # check to make sure instances given are actually in the given ASG + # and they have a non-current launch config + instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances) + + log.debug("new instances needed: {0}".format(num_new_inst_needed)) + log.debug("new instances: {0}".format(new_instances)) + log.debug("old instances: {0}".format(old_instances)) + log.debug("batch instances: {0}".format(",".join(instances_to_terminate))) + + if num_new_inst_needed == 0: + decrement_capacity = True + if as_group.min_size != min_size: + as_group.min_size = min_size + as_group.update() + log.debug("Updating minimum size back to original of {0}".format(min_size)) + #if are some leftover old instances, but we are already at capacity with new ones + # we don't want to decrement capacity + if leftovers: + decrement_capacity = False + break_loop = True + instances_to_terminate = old_instances + desired_size = min_size + log.debug("No new instances needed") + + if num_new_inst_needed < batch_size and num_new_inst_needed !=0 : + instances_to_terminate = instances_to_terminate[:num_new_inst_needed] + decrement_capacity = False + break_loop = False + log.debug("{0} new instances needed".format(num_new_inst_needed)) + + log.debug("decrementing capacity: {0}".format(decrement_capacity)) + + for instance_id in instances_to_terminate: + elb_dreg(connection, module, group_name, instance_id) + log.debug("terminating instance: {0}".format(instance_id)) + connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity) # we wait to make sure the machines we marked as Unhealthy are # no longer in the list + return break_loop, desired_size, instances_to_terminate + + +def wait_for_term_inst(connection, module, term_instances): + + batch_size = module.params.get('replace_batch_size') + wait_timeout = module.params.get('wait_timeout') + group_name = module.params.get('name') + lc_check = module.params.get('lc_check') + as_group = connection.get_all_groups(names=[group_name])[0] + props = get_properties(as_group) count = 1 wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and count > 0: + log.debug("waiting for instances to terminate") count = 0 as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) instance_facts = props['instance_facts'] - instances = ( i for i in instance_facts if i in old_instances) + instances = ( i for i in instance_facts if i in term_instances) for i in instances: - if ( instance_facts[i]['lifecycle_state'] == 'Terminating' - or instance_facts[i]['health_status'] == 'Unhealthy' ): + lifecycle = instance_facts[i]['lifecycle_state'] + health = instance_facts[i]['health_status'] + log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health )) + if lifecycle == 'Terminating' or healthy == 'Unhealthy': count += 1 time.sleep(10) @@ -571,21 +734,24 @@ def terminate_batch(connection, module, replace_instances): # waiting took too long module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime()) -def wait_for_new_instances(module, connection, group_name, wait_timeout, desired_size, prop): + +def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop): # make sure we have the latest stats after that last loop. as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) + log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop])) # now we make sure that we have enough instances in a viable state wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and desired_size > props[prop]: + log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop])) time.sleep(10) as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = "Waited too long for new instances to become viable. %s" % time.asctime()) - + log.debug("Reached {0}: {1}".format(prop, desired_size)) return props def main(): From e9a9d28b6dd9d6e90a96b285d5e17c32e3d6c8b3 Mon Sep 17 00:00:00 2001 From: Juho-Mikko Pellinen Date: Wed, 15 Apr 2015 13:43:00 +0300 Subject: [PATCH 045/464] Add support for specifying unique hosted zone identifier --- cloud/amazon/route53.py | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index f89fca448b7..75d3a66ae6b 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -35,6 +35,12 @@ options: required: true default: null aliases: [] + hosted_zone_id: + description: + - The Hosted Zone ID of the DNS zone to modify + required: false + default: null + aliases: [] record: description: - The full DNS record to create or delete @@ -156,6 +162,16 @@ EXAMPLES = ''' alias=True alias_hosted_zone_id="{{ elb_zone_id }}" +# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value +# that the entire parameter list must be quoted: +- route53: + command: "create" + zone: "foo.com" + hostes_zone_id: "Z2AABBCCDDEEFF" + record: "localhost.foo.com" + type: "AAAA" + ttl: "7200" + value: "::1" ''' @@ -191,6 +207,7 @@ def main(): argument_spec.update(dict( command = dict(choices=['get', 'create', 'delete'], required=True), zone = dict(required=True), + hosted_zone_id = dict(required=False), record = dict(required=True), ttl = dict(required=False, default=3600), type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True), @@ -209,6 +226,7 @@ def main(): command_in = module.params.get('command') zone_in = module.params.get('zone').lower() + hosted_zone_id_in = module.params.get('hosted_zone_id') ttl_in = module.params.get('ttl') record_in = module.params.get('record').lower() type_in = module.params.get('type') @@ -257,9 +275,17 @@ def main(): # the private_zone_in boolean specified in the params if module.boolean(r53zone['Config'].get('PrivateZone', False)) == private_zone_in: zone_id = r53zone['Id'].replace('/hostedzone/', '') - zones[r53zone['Name']] = zone_id + # only save when unique hosted_zone_id is given and is equal + # hosted_zone_id_in is specified in the params + if hosted_zone_id_in and zone_id == hosted_zone_id_in: + zones[r53zone['Name']] = zone_id + elif not hosted_zone_id_in: + zones[r53zone['Name']] = zone_id # Verify that the requested zone is already defined in Route53 + if not zone_in in zones and hosted_zone_id_in: + errmsg = "Hosted_zone_id %s does not exist in Route53" % hosted_zone_id_in + module.fail_json(msg = errmsg) if not zone_in in zones: errmsg = "Zone %s does not exist in Route53" % zone_in module.fail_json(msg = errmsg) @@ -282,6 +308,8 @@ def main(): record['ttl'] = rset.ttl record['value'] = ','.join(sorted(rset.resource_records)) record['values'] = sorted(rset.resource_records) + if hosted_zone_id_in: + record['hosted_zone_id'] = hosted_zone_id_in if rset.alias_dns_name: record['alias'] = True record['value'] = rset.alias_dns_name From 85cff6699e0149bc129e4b384dd9af1efa00a3c1 Mon Sep 17 00:00:00 2001 From: Chris AtLee Date: Thu, 16 Apr 2015 17:06:19 -0400 Subject: [PATCH 046/464] Add support for 'update' parameter to hg module --- source_control/hg.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/source_control/hg.py b/source_control/hg.py index d83215fabe1..29086fb9aa5 100644 --- a/source_control/hg.py +++ b/source_control/hg.py @@ -65,6 +65,13 @@ options: required: false default: "no" choices: [ "yes", "no" ] + update: + required: false + default: "yes" + choices: [ "yes", "no" ] + version_added: "2.0" + description: + - If C(no), do not retrieve new revisions from the origin repository executable: required: false default: null @@ -210,6 +217,7 @@ def main(): revision = dict(default=None, aliases=['version']), force = dict(default='no', type='bool'), purge = dict(default='no', type='bool'), + update = dict(default='yes', type='bool'), executable = dict(default=None), ), ) @@ -218,6 +226,7 @@ def main(): revision = module.params['revision'] force = module.params['force'] purge = module.params['purge'] + update = module.params['update'] hg_path = module.params['executable'] or module.get_bin_path('hg', True) hgrc = os.path.join(dest, '.hg/hgrc') @@ -234,6 +243,9 @@ def main(): (rc, out, err) = hg.clone() if rc != 0: module.fail_json(msg=err) + elif not update: + # Just return having found a repo already in the dest path + before = hg.get_revision() elif hg.at_revision: # no update needed, don't pull before = hg.get_revision() From 3c605d4aba68e0170d6779117e1d326790291059 Mon Sep 17 00:00:00 2001 From: Petros Moisiadis Date: Fri, 17 Apr 2015 19:43:25 +0300 Subject: [PATCH 047/464] make migrate command idempotent with django built-in migrations Django since version 1.7 has built-in migrations, so no need to have south installed with recent django versions. The 'migrate' command works with built-in migrations without any change, but the output is different than the output produced by south, which breaks idempotence. This commit fixes this. --- web_infrastructure/django_manage.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py index 46ebb2fb8f1..13207f955c2 100644 --- a/web_infrastructure/django_manage.py +++ b/web_infrastructure/django_manage.py @@ -89,7 +89,7 @@ notes: - I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified. - This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location. - This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately. - - To be able to use the migrate command, you must have south installed and added as an app in your settings + - To be able to use the migrate command with django versions < 1.7, you must have south installed and added as an app in your settings - To be able to use the collectstatic command, you must have enabled staticfiles in your settings requirements: [ "virtualenv", "django" ] author: Scott Anderson @@ -159,7 +159,7 @@ def syncdb_filter_output(line): return ("Creating table " in line) or ("Installed" in line and "Installed 0 object" not in line) def migrate_filter_output(line): - return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) + return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) or ("Applying" in line) def main(): command_allowed_param_map = dict( From 3b954edab2bf54c710b86d95482548b893d648fa Mon Sep 17 00:00:00 2001 From: Lucas David Traverso Date: Sun, 19 Apr 2015 04:39:59 -0300 Subject: [PATCH 048/464] django_manage: Use shebang in manage.py instead of hardcode python --- web_infrastructure/django_manage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py index 46ebb2fb8f1..c721456715f 100644 --- a/web_infrastructure/django_manage.py +++ b/web_infrastructure/django_manage.py @@ -234,7 +234,7 @@ def main(): _ensure_virtualenv(module) - cmd = "python manage.py %s" % (command, ) + cmd = "./manage.py %s" % (command, ) if command in noinput_commands: cmd = '%s --noinput' % cmd From 1f37f2a1523e2a69685220906f80bad3f8cbd760 Mon Sep 17 00:00:00 2001 From: "Hennadiy (Gena) Verkh" Date: Tue, 21 Apr 2015 11:43:09 +0200 Subject: [PATCH 049/464] Removed method restriction in uri module --- network/basics/uri.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index 24f0dbf9e1f..7be1cc92159 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -73,7 +73,6 @@ options: description: - The HTTP method of the request or response. required: false - choices: [ "GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH" ] default: "GET" return_content: description: @@ -341,7 +340,7 @@ def main(): password = dict(required=False, default=None), body = dict(required=False, default=None), body_format = dict(required=False, default='raw', choices=['raw', 'json']), - method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH']), + method = dict(required=False, default='GET'), return_content = dict(required=False, default='no', type='bool'), force_basic_auth = dict(required=False, default='no', type='bool'), follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']), From 501a665060ab292f6681a829bc4da2cc27a41a5b Mon Sep 17 00:00:00 2001 From: Ian Babrou Date: Sun, 22 Mar 2015 14:16:02 +0300 Subject: [PATCH 050/464] Pulling missing docker image before doing anything --- cloud/docker/docker.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 158b8c8135d..48ab6449f53 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1460,10 +1460,14 @@ def main(): if count > 1 and name: module.fail_json(msg="Count and name must not be used together") - # Explicitly pull new container images, if requested. - # Do this before noticing running and deployed containers so that the image names will differ - # if a newer image has been pulled. - if pull == "always": + # Explicitly pull new container images, if requested. Do this before + # noticing running and deployed containers so that the image names + # will differ if a newer image has been pulled. + # Missing images should be pulled first to avoid downtime when old + # container is stopped, but image for new one is now downloaded yet. + # It also prevents removal of running container before realizing + # that requested image cannot be retrieved. + if pull == "always" or (state == 'reloaded' and manager.get_inspect_image() is None): manager.pull_image() containers = ContainerSet(manager) From fff29f049e1f7b2103f4527ae440c92950ade6b0 Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Fri, 8 May 2015 01:40:10 +0900 Subject: [PATCH 051/464] Not use "is" to compare strings As "is" tests whether if operands are the same object rather than they have the same value, potentially causes a wrong result. --- system/service.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/system/service.py b/system/service.py index 3589340f152..5627f128c92 100644 --- a/system/service.py +++ b/system/service.py @@ -862,7 +862,7 @@ class LinuxService(Service): if self.svc_cmd and self.svc_cmd.endswith('rc-service') and self.action == 'start' and self.crashed: self.execute_command("%s zap" % svc_cmd, daemonize=True) - if self.action is not "restart": + if self.action != "restart": if svc_cmd != '': # upstart or systemd or OpenRC rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True) @@ -970,11 +970,11 @@ class FreeBsdService(Service): def service_control(self): - if self.action is "start": + if self.action == "start": self.action = "onestart" - if self.action is "stop": + if self.action == "stop": self.action = "onestop" - if self.action is "reload": + if self.action == "reload": self.action = "onereload" return self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, self.action, self.arguments)) @@ -1180,9 +1180,9 @@ class NetBsdService(Service): self.running = True def service_control(self): - if self.action is "start": + if self.action == "start": self.action = "onestart" - if self.action is "stop": + if self.action == "stop": self.action = "onestop" self.svc_cmd = "%s" % self.svc_initscript From c3cb39dfa02d587315217a24fbf909d8b2ddca71 Mon Sep 17 00:00:00 2001 From: 0tmc Date: Fri, 8 May 2015 12:22:37 +0300 Subject: [PATCH 052/464] Use of proper fstab file on FreeBSD --- system/mount.py | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/system/mount.py b/system/mount.py index e11d497220b..eed72d2d7c9 100644 --- a/system/mount.py +++ b/system/mount.py @@ -206,13 +206,29 @@ def unset_mount(**kwargs): def mount(module, **kwargs): """ mount up a path or remount if needed """ + + # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab + args = dict( + opts = 'default', + dump = '0', + passno = '0', + fstab = '/etc/fstab' + ) + args.update(kwargs) + mount_bin = module.get_bin_path('mount') name = kwargs['name'] + + cmd = [ mount_bin, ] + if os.path.ismount(name): - cmd = [ mount_bin , '-o', 'remount', name ] - else: - cmd = [ mount_bin, name ] + cmd += [ '-o', 'remount', ] + + if get_platform().lower() == 'freebsd': + cmd += [ '-F', args['fstab'], ] + + cmd += [ name, ] rc, out, err = module.run_command(cmd) if rc == 0: From 8db564dcf79fda671f69ca702af9d92a855ec491 Mon Sep 17 00:00:00 2001 From: Jamie Hannaford Date: Fri, 8 May 2015 14:05:16 +0200 Subject: [PATCH 053/464] Fix attribute name for Auto Scale servers --- cloud/rackspace/rax_scaling_group.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/rackspace/rax_scaling_group.py b/cloud/rackspace/rax_scaling_group.py index 64783397016..e3c6ec8944f 100644 --- a/cloud/rackspace/rax_scaling_group.py +++ b/cloud/rackspace/rax_scaling_group.py @@ -262,8 +262,8 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, # Launch Configuration Updates lc = sg.get_launch_config() lc_args = {} - if server_name != lc.get('name'): - lc_args['name'] = server_name + if server_name != lc.get('server_name'): + lc_args['server_name'] = server_name if image != lc.get('image'): lc_args['image'] = image From f620a0ac41c82edc1e894734f4e0c4dbf63dfd3f Mon Sep 17 00:00:00 2001 From: HelenaTian Date: Tue, 12 May 2015 20:53:36 -0700 Subject: [PATCH 054/464] Update gce.py to correctly handle propagated metadata type from a mother template My project is using Ansible to automate cloud build process. Ansible has a core module gce.py for managing GCE instances. We've come across a use case that's not yet supported - when executing ansible-playbook, if a child template is included, then metadata which is defined in and propagated from the mother template is treated as string type and not parsed correctly(which instead is dictionary type), and triggers release flow failure. We currently put some fix by explicitly casting metadata to string type in our own branch, but would like to contribute the fix to Ansible so that everyone onboarding GCE and using Ansible for release management could benefit from it, or hear owner's opinion on fixing the issue if there's a better fix in owner's mind:) --- cloud/google/gce.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/google/gce.py b/cloud/google/gce.py index 314f1200161..be9e6818c76 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -317,7 +317,7 @@ def create_instances(module, gce, instance_names): # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...] if metadata: try: - md = literal_eval(metadata) + md = literal_eval(str(metadata)) if not isinstance(md, dict): raise ValueError('metadata must be a dict') except ValueError, e: From b307f7bcba19354aac041cd22bbd30cc27dce653 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 14 May 2015 14:19:15 -0700 Subject: [PATCH 055/464] Fix problem writing binary content to a temporary file in the uri module. Fixes https://github.com/ansible/ansible/issues/10938 Fixes https://github.com/ansible/ansible/issues/7606 --- network/basics/uri.py | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index ce2cc888779..a3f77919c0f 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -20,6 +20,7 @@ # # see examples/playbooks/uri.yml +import cgi import shutil import tempfile import base64 @@ -188,7 +189,6 @@ try: except ImportError: HAS_URLPARSE = False - def write_file(module, url, dest, content): # create a tempfile with some test content fd, tmpsrc = tempfile.mkstemp() @@ -309,10 +309,7 @@ def uri(module, url, dest, user, password, body, body_format, method, headers, r r['redirected'] = redirected r.update(resp_redir) r.update(resp) - try: - return r, unicode(content.decode('raw_unicode_escape')), dest - except: - return r, content, dest + return r, content, dest except httplib2.RedirectMissingLocation: module.fail_json(msg="A 3xx redirect response code was provided but no Location: header was provided to point to the new location.") except httplib2.RedirectLimit: @@ -440,22 +437,32 @@ def main(): ukey = key.replace("-", "_") uresp[ukey] = value + # Default content_encoding to try + content_encoding = 'utf-8' if 'content_type' in uresp: - if uresp['content_type'].startswith('application/json') or \ - uresp['content_type'].startswith('text/json'): + content_type, params = cgi.parse_header(uresp['content_type']) + if 'charset' in params: + content_encoding = params['charset'] + u_content = unicode(content, content_encoding, errors='xmlcharrefreplace') + if content_type.startswith('application/json') or \ + content_type.startswith('text/json'): try: - js = json.loads(content) + js = json.loads(u_content) uresp['json'] = js except: pass + else: + u_content = unicode(content, content_encoding, errors='xmlcharrefreplace') + if resp['status'] not in status_code: - module.fail_json(msg="Status code was not " + str(status_code), content=content, **uresp) + module.fail_json(msg="Status code was not " + str(status_code), content=u_content, **uresp) elif return_content: - module.exit_json(changed=changed, content=content, **uresp) + module.exit_json(changed=changed, content=u_content, **uresp) else: module.exit_json(changed=changed, **uresp) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() From a0b57f3aab09c16b7b16a2e908c4067bfb194e8e Mon Sep 17 00:00:00 2001 From: Vasyl Kaigorodov Date: Fri, 15 May 2015 15:28:28 +0200 Subject: [PATCH 056/464] GCE module: add posibility to specify Service Account permissions during instance creation --- cloud/google/gce.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/cloud/google/gce.py b/cloud/google/gce.py index 314f1200161..20ceb257b3a 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -287,6 +287,8 @@ def create_instances(module, gce, instance_names): ip_forward = module.params.get('ip_forward') external_ip = module.params.get('external_ip') disk_auto_delete = module.params.get('disk_auto_delete') + service_account_permissions = module.params.get('service_account_permissions') + service_account_email = module.params.get('service_account_email') if external_ip == "none": external_ip = None @@ -330,6 +332,14 @@ def create_instances(module, gce, instance_names): items.append({"key": k,"value": v}) metadata = {'items': items} + ex_sa_perms = [] + if service_account_permissions: + if service_account_email: + ex_sa_perms.append({'email': service_account_email}) + else: + ex_sa_perms.append({'email': "default"}) + ex_sa_perms[0]['scopes'] = service_account_permissions + # These variables all have default values but check just in case if not lc_image or not lc_network or not lc_machine_type or not lc_zone: module.fail_json(msg='Missing required create instance variable', @@ -349,7 +359,7 @@ def create_instances(module, gce, instance_names): inst = gce.create_node(name, lc_machine_type, lc_image, location=lc_zone, ex_network=network, ex_tags=tags, ex_metadata=metadata, ex_boot_disk=pd, ex_can_ip_forward=ip_forward, - external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete) + external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete, ex_service_accounts=ex_sa_perms) changed = True except ResourceExistsError: inst = gce.ex_get_node(name, lc_zone) @@ -437,6 +447,7 @@ def main(): tags = dict(type='list'), zone = dict(default='us-central1-a'), service_account_email = dict(), + service_account_permissions = dict(type='list'), pem_file = dict(), project_id = dict(), ip_forward = dict(type='bool', default=False), From f714cc5f7ecb7d8f8bf994276292db6e72caa0a2 Mon Sep 17 00:00:00 2001 From: Vasyl Kaigorodov Date: Fri, 15 May 2015 15:34:36 +0200 Subject: [PATCH 057/464] GCE module: document Service Account permissions parameter usage --- cloud/google/gce.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cloud/google/gce.py b/cloud/google/gce.py index 20ceb257b3a..261f6d32297 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -58,6 +58,13 @@ options: required: false default: null aliases: [] + service_account_permissions: + version_added: 1.5.1 + description: + - service account permissions (see U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), --scopes section for detailed information) + required: false + default: null + aliases: [] pem_file: version_added: 1.5.1 description: From fa9727eb99fdd0c38ed7f3ba72cdf31c69e82a61 Mon Sep 17 00:00:00 2001 From: Vasyl Kaigorodov Date: Fri, 15 May 2015 16:00:24 +0200 Subject: [PATCH 058/464] GCE module: added Service Account permissions sanity checks --- cloud/google/gce.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cloud/google/gce.py b/cloud/google/gce.py index 261f6d32297..b288d9dfb43 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -340,7 +340,13 @@ def create_instances(module, gce, instance_names): metadata = {'items': items} ex_sa_perms = [] + bad_perms = [] if service_account_permissions: + for perm in service_account_permissions: + if not perm in gce.SA_SCOPES_MAP.keys(): + bad_perms.append(perm) + if len(bad_perms) > 0: + module.fail_json(msg='bad permissions: %s' % str(bad_perms)) if service_account_email: ex_sa_perms.append({'email': service_account_email}) else: From db292e08b9ec1cfe36f166ed032e1d0115148520 Mon Sep 17 00:00:00 2001 From: Tristan Fisher Date: Fri, 15 May 2015 17:09:54 -0400 Subject: [PATCH 059/464] standardizes bools in argument_spec --- files/file.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/files/file.py b/files/file.py index 8da87b0707e..3b4b5a466e6 100644 --- a/files/file.py +++ b/files/file.py @@ -34,7 +34,7 @@ module: file version_added: "historical" short_description: Sets attributes of files extends_documentation_fragment: files -description: +description: - Sets attributes of files, symlinks, and directories, or removes files/symlinks/directories. Many other modules support the same options as the M(file) module - including M(copy), M(template), and M(assemble). @@ -48,7 +48,7 @@ options: - 'path to the file being managed. Aliases: I(dest), I(name)' required: true default: [] - aliases: ['dest', 'name'] + aliases: ['dest', 'name'] state: description: - If C(directory), all immediate subdirectories will be created if they @@ -82,7 +82,7 @@ options: default: "no" choices: [ "yes", "no" ] description: - - 'force the creation of the symlinks in two cases: the source file does + - 'force the creation of the symlinks in two cases: the source file does not exist (but will appear later); the destination exists and is a file (so, we need to unlink the "path" file and create symlink to the "src" file in place of it).' ''' @@ -150,10 +150,10 @@ def main(): state = dict(choices=['file','directory','link','hard','touch','absent'], default=None), path = dict(aliases=['dest', 'name'], required=True), original_basename = dict(required=False), # Internal use only, for recursive ops - recurse = dict(default='no', type='bool'), - force = dict(required=False,default=False,type='bool'), + recurse = dict(default=False, type='bool'), + force = dict(required=False, default=False, type='bool'), diff_peek = dict(default=None), - validate = dict(required=False, default=None), + validate = dict(required=False, default=None, type='bool'), src = dict(required=False, default=None), ), add_file_common_args=True, From adf34a6ccde75f32403d3d28a24f28ed9281c52a Mon Sep 17 00:00:00 2001 From: Semyon Deviatkin Date: Mon, 18 May 2015 08:10:22 +0000 Subject: [PATCH 060/464] Fix issue #1230 When virtual package providing only one package, look up status of target package --- packaging/os/apt.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index a160c13c311..ac82f41a7ff 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -206,8 +206,16 @@ def package_status(m, pkgname, version, cache, state): except KeyError: if state == 'install': try: - if cache.get_providing_packages(pkgname): - return False, True, False + provided_packages = cache.get_providing_packages(pkgname) + if provided_packages: + is_installed = False + # when virtual package providing only one package, look up status of target package + if cache.is_virtual_package(pkgname) and len(provided_packages) == 1: + package = provided_packages[0] + installed, upgradable, has_files = package_status(m, package.name, version, cache, state='install') + if installed: + is_installed = True + return is_installed, True, False m.fail_json(msg="No package matching '%s' is available" % pkgname) except AttributeError: # python-apt version too old to detect virtual packages From 78d84dc400a0f42f0b6a03ab981a54f7e70fdf6d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 18 May 2015 08:40:12 -0400 Subject: [PATCH 061/464] value should be string as that is what it compares against --- system/sysctl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/sysctl.py b/system/sysctl.py index 4517c724ca9..c5a68685053 100644 --- a/system/sysctl.py +++ b/system/sysctl.py @@ -322,7 +322,7 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(aliases=['key'], required=True), - value = dict(aliases=['val'], required=False), + value = dict(aliases=['val'], required=False, type='str'), state = dict(default='present', choices=['present', 'absent']), reload = dict(default=True, type='bool'), sysctl_set = dict(default=False, type='bool'), From 81b476cd02ef53a1e665a71bcd098463e1a4ead3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 18 May 2015 11:59:20 -0700 Subject: [PATCH 062/464] Update module "imports" to new style Fixes #1351 --- cloud/amazon/_ec2_ami_search.py | 2 +- cloud/vmware/vsphere_guest.py | 2 +- files/replace.py | 5 +++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/_ec2_ami_search.py b/cloud/amazon/_ec2_ami_search.py index 6296020b777..65953af2b5d 100644 --- a/cloud/amazon/_ec2_ami_search.py +++ b/cloud/amazon/_ec2_ami_search.py @@ -195,7 +195,7 @@ def main(): # this is magic, see lib/ansible/module_common.py -#<> +from ansible.module_utils.basic import * if __name__ == '__main__': main() diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 62057759172..a33fd52ea70 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -1321,6 +1321,6 @@ def main(): # this is magic, see lib/ansible/module_common.py -#<> +from ansible.module_utils.basic import * if __name__ == '__main__': main() diff --git a/files/replace.py b/files/replace.py index 588af02391e..a21d84cd8df 100644 --- a/files/replace.py +++ b/files/replace.py @@ -160,6 +160,7 @@ def main(): module.exit_json(changed=changed, msg=msg) # this is magic, see lib/ansible/module_common.py -#<> +from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() From 7dd9f57e161b78981eb797a4c77fd6e7042ad7fd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 18 May 2015 12:45:47 -0700 Subject: [PATCH 063/464] Fix splitting of role_attrs --- database/postgresql/postgresql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index 7dda85f343c..78aab6b88da 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -263,7 +263,7 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir role_attr_flags_changing = False if role_attr_flags: role_attr_flags_dict = {} - for r in role_attr_flags.split(','): + for r in role_attr_flags.split(' '): if r.startswith('NO'): role_attr_flags_dict[r.replace('NO', '', 1)] = False else: From 1f7f7c7dbc23ac517461439105a4c9f5b370e024 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Mon, 18 May 2015 21:30:18 +0100 Subject: [PATCH 064/464] Update ec2.py Update instance_type description to feature up-to-date list of instance types. Matches format of 'ebs_optimized' param which also includes a link. Update region description to feature up-to-date list of ec2 regions. Matches format of 'ebs_optimized' param which also includes a link. Updated choices for monitoring param to be explicit bool matching default 'yes'. Updated choices for source_dest_check to be explicit bool matching default 'true'. Updated tenancy choices to remove redundant phrasing in comments. --- cloud/amazon/ec2.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 95dab865026..6afbe22885b 100755 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -44,7 +44,7 @@ options: region: version_added: "1.2" description: - - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. + - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region) required: false default: null aliases: [ 'aws_region', 'ec2_region' ] @@ -57,16 +57,17 @@ options: aliases: [ 'aws_zone', 'ec2_zone' ] instance_type: description: - - instance type to use for the instance + - instance type to use for the instance, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) required: true default: null aliases: [] tenancy: version_added: "1.9" description: - - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are "default" or "dedicated". Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. + - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. required: false default: default + choices: [ "default", "dedicated" ] aliases: [] spot_price: version_added: "1.5" @@ -123,6 +124,7 @@ options: - enable detailed monitoring (CloudWatch) for instance required: false default: null + choices: [ "yes", "no" ] aliases: [] user_data: version_added: "0.9" @@ -187,6 +189,7 @@ options: - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers) required: false default: true + choices: [ "true", "false" ] state: version_added: "1.3" description: From 6196dc8c31bb4828a66d53cc2233319b1d9d69be Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 18 May 2015 16:33:46 -0400 Subject: [PATCH 065/464] minor doc fix and made sure check_implicit_admin is true boolean --- database/mysql/mysql_user.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 54b63eed4d7..bd289ff30de 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -90,7 +90,8 @@ options: description: - Check if mysql allows login as root/nopassword before trying supplied credentials. required: false - default: false + choices: [ "yes", "no" ] + default: "no" version_added: "1.3" update_password: required: false @@ -385,8 +386,8 @@ def main(): host=dict(default="localhost"), state=dict(default="present", choices=["absent", "present"]), priv=dict(default=None), - append_privs=dict(type="bool", default="no"), - check_implicit_admin=dict(default=False), + append_privs=dict(default=False, type='bool'), + check_implicit_admin=dict(default=False, type='bool'), update_password=dict(default="always", choices=["always", "on_create"]), config_file=dict(default="~/.my.cnf"), ) From c97af02e40ba4acfb49922b8a9c1018af181f9ba Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Mon, 18 May 2015 21:34:57 +0100 Subject: [PATCH 066/464] Update ec2.py updated default and choices to match standard --- cloud/amazon/ec2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 6afbe22885b..ca0e3d34bc6 100755 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -188,8 +188,8 @@ options: description: - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers) required: false - default: true - choices: [ "true", "false" ] + default: yes + choices: [ "yes", "no" ] state: version_added: "1.3" description: From 90b34e2b67f6c32904a20d5edec9ffdae898cc88 Mon Sep 17 00:00:00 2001 From: Tristan Fisher Date: Mon, 18 May 2015 18:44:31 -0400 Subject: [PATCH 067/464] removes bool type from validate. --- files/file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/file.py b/files/file.py index 3b4b5a466e6..fb3241b4241 100644 --- a/files/file.py +++ b/files/file.py @@ -153,7 +153,7 @@ def main(): recurse = dict(default=False, type='bool'), force = dict(required=False, default=False, type='bool'), diff_peek = dict(default=None), - validate = dict(required=False, default=None, type='bool'), + validate = dict(required=False, default=None), src = dict(required=False, default=None), ), add_file_common_args=True, From f7064c672cb07b511112d26c0f52ec741e68aa34 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Mon, 18 May 2015 22:55:51 -0400 Subject: [PATCH 068/464] added nice error for systemd hosts where name > 64 chars --- system/hostname.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/system/hostname.py b/system/hostname.py index 307a8b687be..1fe16e506f4 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -248,6 +248,8 @@ class SystemdStrategy(GenericStrategy): return out.strip() def set_current_hostname(self, name): + if len(name) > 64: + self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name") cmd = ['hostnamectl', '--transient', 'set-hostname', name] rc, out, err = self.module.run_command(cmd) if rc != 0: @@ -263,6 +265,8 @@ class SystemdStrategy(GenericStrategy): return out.strip() def set_permanent_hostname(self, name): + if len(name) > 64: + self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name") cmd = ['hostnamectl', '--pretty', 'set-hostname', name] rc, out, err = self.module.run_command(cmd) if rc != 0: From b9dba50372b3fe41a1f231fad44d9cddbcd99795 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 18 May 2015 16:33:46 -0400 Subject: [PATCH 069/464] minor doc fix and made sure check_implicit_admin is true boolean --- database/mysql/mysql_user.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 54b63eed4d7..bd289ff30de 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -90,7 +90,8 @@ options: description: - Check if mysql allows login as root/nopassword before trying supplied credentials. required: false - default: false + choices: [ "yes", "no" ] + default: "no" version_added: "1.3" update_password: required: false @@ -385,8 +386,8 @@ def main(): host=dict(default="localhost"), state=dict(default="present", choices=["absent", "present"]), priv=dict(default=None), - append_privs=dict(type="bool", default="no"), - check_implicit_admin=dict(default=False), + append_privs=dict(default=False, type='bool'), + check_implicit_admin=dict(default=False, type='bool'), update_password=dict(default="always", choices=["always", "on_create"]), config_file=dict(default="~/.my.cnf"), ) From 37532628c053a3c975a0104a3d5828dd42bb35f1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 19 May 2015 10:44:17 -0400 Subject: [PATCH 070/464] minor docfix --- files/file.py | 1 - 1 file changed, 1 deletion(-) diff --git a/files/file.py b/files/file.py index fb3241b4241..042129565d7 100644 --- a/files/file.py +++ b/files/file.py @@ -66,7 +66,6 @@ options: src: required: false default: null - choices: [] description: - path of the file to link to (applies only to C(state=link)). Will accept absolute, relative and nonexisting paths. Relative paths are not expanded. From 5011593ee398895d25c8078f3bf073442f4fa33d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 19 May 2015 10:52:33 -0400 Subject: [PATCH 071/464] fixed typo on novaclient var --- cloud/openstack/nova_keypair.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/nova_keypair.py b/cloud/openstack/nova_keypair.py index 686484cf37f..1182b0daa4a 100644 --- a/cloud/openstack/nova_keypair.py +++ b/cloud/openstack/nova_keypair.py @@ -97,7 +97,7 @@ def main(): state = dict(default='present', choices=['absent', 'present']) )) module = AnsibleModule(argument_spec=argument_spec) - if not HAVE_NOVACLIENT: + if not HAS_NOVACLIENT: module.fail_json(msg='python-novaclient is required for this module to work') nova = nova_client.Client(module.params['login_username'], From f38bf24c8a597a4e12e955269621b7d32532e252 Mon Sep 17 00:00:00 2001 From: Greg Taylor Date: Tue, 19 May 2015 11:45:00 -0700 Subject: [PATCH 072/464] Adding directory creation example to file module. --- files/file.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/files/file.py b/files/file.py index 042129565d7..329fe1e0263 100644 --- a/files/file.py +++ b/files/file.py @@ -101,6 +101,9 @@ EXAMPLES = ''' # touch the same file, but add/remove some permissions - file: path=/etc/foo.conf state=touch mode="u+rw,g-wx,o-rwx" +# create a directory if it doesn't exist +- file: path=/etc/some_directory state=directory mode=0755 + ''' From c9b17136e404ec8090a1d140f6875b6687720639 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 19 May 2015 12:41:48 -0700 Subject: [PATCH 073/464] Fix a problem introduced with #1101 and optimize privilege handling * If a db user belonged to a role which had a privilege, the user would not have the privilege added as the role gave the appearance that the user already had it. Fixed to always check the privileges specific to the user. * Make fewer db queries to determine if privileges need to be changed and change them (was four for each privilege. Now two for each object that has a set of privileges changed). --- database/postgresql/postgresql_user.py | 136 +++++++++++++++---------- 1 file changed, 81 insertions(+), 55 deletions(-) diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index 78aab6b88da..98f234fc1db 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -324,12 +324,21 @@ def user_delete(cursor, user): cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete") return True -def has_table_privilege(cursor, user, table, priv): - if priv == 'ALL': - priv = ','.join([ p for p in VALID_PRIVS['table'] if p != 'ALL' ]) - query = 'SELECT has_table_privilege(%s, %s, %s)' - cursor.execute(query, (user, table, priv)) - return cursor.fetchone()[0] +def has_table_privileges(cursor, user, table, privs): + """ + Return the difference between the privileges that a user already has and + the privileges that they desire to have. + + :returns: tuple of: + * privileges that they have and were requested + * privileges they currently hold but were not requested + * privileges requested that they do not hold + """ + cur_privs = get_table_privileges(cursor, user, table) + have_currently = cur_privs.intersection(privs) + other_current = cur_privs.difference(privs) + desired = privs.difference(cur_privs) + return (have_currently, other_current, desired) def get_table_privileges(cursor, user, table): if '.' in table: @@ -339,26 +348,21 @@ def get_table_privileges(cursor, user, table): query = '''SELECT privilege_type FROM information_schema.role_table_grants WHERE grantee=%s AND table_name=%s AND table_schema=%s''' cursor.execute(query, (user, table, schema)) - return set([x[0] for x in cursor.fetchall()]) + return frozenset([x[0] for x in cursor.fetchall()]) -def grant_table_privilege(cursor, user, table, priv): +def grant_table_privileges(cursor, user, table, privs): # Note: priv escaped by parse_privs - prev_priv = get_table_privileges(cursor, user, table) + privs = ', '.join(privs) query = 'GRANT %s ON TABLE %s TO %s' % ( - priv, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') ) + privs, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') ) cursor.execute(query) - curr_priv = get_table_privileges(cursor, user, table) - return len(curr_priv) > len(prev_priv) -def revoke_table_privilege(cursor, user, table, priv): +def revoke_table_privileges(cursor, user, table, privs): # Note: priv escaped by parse_privs - prev_priv = get_table_privileges(cursor, user, table) + privs = ', '.join(privs) query = 'REVOKE %s ON TABLE %s FROM %s' % ( - priv, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') ) + privs, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') ) cursor.execute(query) - curr_priv = get_table_privileges(cursor, user, table) - return len(curr_priv) < len(prev_priv) - def get_database_privileges(cursor, user, db): priv_map = { @@ -370,80 +374,89 @@ def get_database_privileges(cursor, user, db): cursor.execute(query, (db,)) datacl = cursor.fetchone()[0] if datacl is None: - return [] + return set() r = re.search('%s=(C?T?c?)/[a-z]+\,?' % user, datacl) if r is None: - return [] - o = [] + return set() + o = set() for v in r.group(1): - o.append(priv_map[v]) - return o + o.add(priv_map[v]) + return normalize_privileges(o, 'database') -def has_database_privilege(cursor, user, db, priv): - if priv == 'ALL': - priv = ','.join([ p for p in VALID_PRIVS['database'] if p != 'ALL' ]) - query = 'SELECT has_database_privilege(%s, %s, %s)' - cursor.execute(query, (user, db, priv)) - return cursor.fetchone()[0] +def has_database_privileges(cursor, user, db, privs): + """ + Return the difference between the privileges that a user already has and + the privileges that they desire to have. + + :returns: tuple of: + * privileges that they have and were requested + * privileges they currently hold but were not requested + * privileges requested that they do not hold + """ + cur_privs = get_database_privileges(cursor, user, db) + have_currently = cur_privs.intersection(privs) + other_current = cur_privs.difference(privs) + desired = privs.difference(cur_privs) + return (have_currently, other_current, desired) -def grant_database_privilege(cursor, user, db, priv): +def grant_database_privileges(cursor, user, db, privs): # Note: priv escaped by parse_privs - prev_priv = get_database_privileges(cursor, user, db) + privs =', '.join(privs) if user == "PUBLIC": query = 'GRANT %s ON DATABASE %s TO PUBLIC' % ( - priv, pg_quote_identifier(db, 'database')) + privs, pg_quote_identifier(db, 'database')) else: query = 'GRANT %s ON DATABASE %s TO %s' % ( - priv, pg_quote_identifier(db, 'database'), + privs, pg_quote_identifier(db, 'database'), pg_quote_identifier(user, 'role')) cursor.execute(query) - curr_priv = get_database_privileges(cursor, user, db) - return len(curr_priv) > len(prev_priv) -def revoke_database_privilege(cursor, user, db, priv): +def revoke_database_privileges(cursor, user, db, privs): # Note: priv escaped by parse_privs - prev_priv = get_database_privileges(cursor, user, db) + privs = ', '.join(privs) if user == "PUBLIC": query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % ( - priv, pg_quote_identifier(db, 'database')) + privs, pg_quote_identifier(db, 'database')) else: query = 'REVOKE %s ON DATABASE %s FROM %s' % ( - priv, pg_quote_identifier(db, 'database'), + privs, pg_quote_identifier(db, 'database'), pg_quote_identifier(user, 'role')) cursor.execute(query) - curr_priv = get_database_privileges(cursor, user, db) - return len(curr_priv) < len(prev_priv) def revoke_privileges(cursor, user, privs): if privs is None: return False + revoke_funcs = dict(table=revoke_table_privileges, database=revoke_database_privileges) + check_funcs = dict(table=has_table_privileges, database=has_database_privileges) + changed = False - revoke_funcs = dict(table=revoke_table_privilege, database=revoke_database_privilege) - check_funcs = dict(table=has_table_privilege, database=has_database_privilege) for type_ in privs: for name, privileges in privs[type_].iteritems(): - for privilege in privileges: - if check_funcs[type_](cursor, user, name, privilege): - changed = revoke_funcs[type_](cursor, user, name, privilege)\ - or changed - + # Check that any of the privileges requested to be removed are + # currently granted to the user + differences = check_funcs[type_](cursor, user, name, privileges) + if differences[0]: + revoke_funcs[type_](cursor, user, name, privileges) + changed = True return changed def grant_privileges(cursor, user, privs): if privs is None: return False - grant_funcs = dict(table=grant_table_privilege, database=grant_database_privilege) - check_funcs = dict(table=has_table_privilege, database=has_database_privilege) + + grant_funcs = dict(table=grant_table_privileges, database=grant_database_privileges) + check_funcs = dict(table=has_table_privileges, database=has_database_privileges) changed = False for type_ in privs: for name, privileges in privs[type_].iteritems(): - for privilege in privileges: - if not check_funcs[type_](cursor, user, name, privilege): - changed = grant_funcs[type_](cursor, user, name, privilege)\ - or changed - + # Check that any of the privileges requested for the user are + # currently missing + differences = check_funcs[type_](cursor, user, name, privileges) + if differences[2]: + grant_funcs[type_](cursor, user, name, privileges) + changed = True return changed def parse_role_attrs(role_attr_flags): @@ -472,6 +485,17 @@ def parse_role_attrs(role_attr_flags): o_flags = ' '.join(flag_set) return o_flags +def normalize_privileges(privs, type_): + new_privs = set(privs) + if 'ALL' in privs: + new_privs.update(VALID_PRIVS[type_]) + new_privs.remove('ALL') + if 'TEMP' in privs: + new_privs.add('TEMPORARY') + new_privs.remove('TEMP') + + return new_privs + def parse_privs(privs, db): """ Parse privilege string to determine permissions for database db. @@ -504,6 +528,8 @@ def parse_privs(privs, db): if not priv_set.issubset(VALID_PRIVS[type_]): raise InvalidPrivsError('Invalid privs specified for %s: %s' % (type_, ' '.join(priv_set.difference(VALID_PRIVS[type_])))) + + priv_set = normalize_privileges(priv_set, type_) o_privs[type_][name] = priv_set return o_privs From 4106047e77eb422e67311a2f78e6ffb81f8535d6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 19 May 2015 14:14:40 -0700 Subject: [PATCH 074/464] Fix documentation of the variable that backup file name is returned in --- system/cron.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/cron.py b/system/cron.py index f91587caf66..9728e4de401 100644 --- a/system/cron.py +++ b/system/cron.py @@ -71,7 +71,7 @@ options: backup: description: - If set, create a backup of the crontab before it is modified. - The location of the backup is returned in the C(backup) variable by this module. + The location of the backup is returned in the C(backup_file) variable by this module. required: false default: false minute: From 8d2fdf2aff1106fab5a8a9d17719383c5714efe8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 19 May 2015 14:23:23 -0700 Subject: [PATCH 075/464] Update the cron docs to specify that it takes a boolean value --- system/cron.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/system/cron.py b/system/cron.py index 9728e4de401..cfb254acee4 100644 --- a/system/cron.py +++ b/system/cron.py @@ -73,7 +73,8 @@ options: - If set, create a backup of the crontab before it is modified. The location of the backup is returned in the C(backup_file) variable by this module. required: false - default: false + choices: [ "yes", "no" ] + default: no minute: description: - Minute when the job should run ( 0-59, *, */2, etc ) From ed07502587ea4826d11966bdb9c38c78aa81a5ac Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Wed, 20 May 2015 16:01:29 +0100 Subject: [PATCH 076/464] Update vsphere_guest.py Add example distributed virtual switch example so vm_nic options are clearer. --- cloud/vmware/vsphere_guest.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index a33fd52ea70..a09a59c711d 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -153,6 +153,10 @@ EXAMPLES = ''' type: vmxnet3 network: VM Network network_type: standard + nic2: + type: vmxnet3 + network: dvSwitch Network + network_type: dvs vm_hardware: memory_mb: 2048 num_cpus: 2 From 9468a65bed2ec4acf9bf6c8bd26abe96a0804153 Mon Sep 17 00:00:00 2001 From: Schlueter Date: Wed, 20 May 2015 13:27:04 -0400 Subject: [PATCH 077/464] Add state to easy_install --- packaging/language/easy_install.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/packaging/language/easy_install.py b/packaging/language/easy_install.py index 889a81f025a..b95cb1bb08e 100644 --- a/packaging/language/easy_install.py +++ b/packaging/language/easy_install.py @@ -89,8 +89,9 @@ EXAMPLES = ''' - easy_install: name=bottle virtualenv=/webapps/myapp/venv ''' -def _is_package_installed(module, name, easy_install): - cmd = '%s --dry-run %s' % (easy_install, name) +def _is_package_installed(module, name, easy_install, executable_arguments): + executable_arguments.append('--dry-run') + cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name) rc, status_stdout, status_stderr = module.run_command(cmd) return not ('Reading' in status_stdout or 'Downloading' in status_stdout) @@ -124,6 +125,10 @@ def _get_easy_install(module, env=None, executable=None): def main(): arg_spec = dict( name=dict(required=True), + state=dict(required=False, + default='present', + choices=['present','latest'], + type='str'), virtualenv=dict(default=None, required=False), virtualenv_site_packages=dict(default='no', type='bool'), virtualenv_command=dict(default='virtualenv', required=False), @@ -137,6 +142,8 @@ def main(): executable = module.params['executable'] site_packages = module.params['virtualenv_site_packages'] virtualenv_command = module.params['virtualenv_command'] + executable_arguments = [] + module.params['state'] is 'latest' and executable_arguments.append('--upgrade') rc = 0 err = '' @@ -167,7 +174,7 @@ def main(): if not installed: if module.check_mode: module.exit_json(changed=True) - cmd = '%s %s' % (easy_install, name) + cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name) rc_easy_inst, out_easy_inst, err_easy_inst = module.run_command(cmd) rc += rc_easy_inst From 6606e4f68dc9d7b6e4fb25e1bd05b02cac84a46c Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 31 Mar 2015 17:05:43 -0400 Subject: [PATCH 078/464] Add OpenStack Network module Also, deprecate the old quantum network module --- ...quantum_network.py => _quantum_network.py} | 1 + cloud/openstack/os_network.py | 107 ++++++++++++++++++ 2 files changed, 108 insertions(+) rename cloud/openstack/{quantum_network.py => _quantum_network.py} (99%) create mode 100644 cloud/openstack/os_network.py diff --git a/cloud/openstack/quantum_network.py b/cloud/openstack/_quantum_network.py similarity index 99% rename from cloud/openstack/quantum_network.py rename to cloud/openstack/_quantum_network.py index ff8b2683f37..93b10880823 100644 --- a/cloud/openstack/quantum_network.py +++ b/cloud/openstack/_quantum_network.py @@ -30,6 +30,7 @@ DOCUMENTATION = ''' --- module: quantum_network version_added: "1.4" +deprecated: Deprecated in 2.0. Use os_network instead short_description: Creates/Removes networks from OpenStack description: - Add or Remove network from OpenStack. diff --git a/cloud/openstack/os_network.py b/cloud/openstack/os_network.py new file mode 100644 index 00000000000..b77a7e331a4 --- /dev/null +++ b/cloud/openstack/os_network.py @@ -0,0 +1,107 @@ +#!/usr/bin/python + +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +DOCUMENTATION = ''' +--- +module: os_network +short_description: Creates/Removes networks from OpenStack +extends_documentation_fragment: openstack +version_added: "2.0" +description: + - Add or Remove network from OpenStack. +options: + name: + description: + - Name to be assigned to the network. + required: true + shared: + description: + - Whether this network is shared or not. + required: false + default: false + admin_state_up: + description: + - Whether the state should be marked as up or down. + required: false + default: true + state: + description: + - Indicate desired state of the resource. + choices: ['present', 'absent'] + required: false + default: present +requirements: ["shade"] +''' + +EXAMPLES = ''' +- os_network: + name=t1network + state=present +''' + + +def main(): + argument_spec = openstack_full_argument_spec( + name=dict(required=True), + shared=dict(default=False, type='bool'), + admin_state_up=dict(default=True, type='bool'), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + state = module.params['state'] + name = module.params['name'] + shared = module.params['shared'] + admin_state_up = module.params['admin_state_up'] + + try: + cloud = shade.openstack_cloud(**module.params) + net = cloud.get_network(name) + + if state == 'present': + if not net: + net = cloud.create_network(name, shared, admin_state_up) + module.exit_json(changed=True, result="Created", id=net['id']) + else: + module.exit_json(changed=False, result="Success", id=net['id']) + + elif state == 'absent': + if not net: + module.exit_json(changed=False, result="Success") + else: + cloud.delete_network(name) + module.exit_json(changed=True, result="Deleted") + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() From 364536a76ef4a3626bbcef3b45effa1bdcb6ce5a Mon Sep 17 00:00:00 2001 From: Zoltan Kozma Date: Wed, 20 May 2015 19:46:39 +0100 Subject: [PATCH 079/464] - Fixed annoying bug that disablerepo was essentially broken if a package needed updating by state latest. - Replaced some unsafe practice with default parameters. However looking at the code this does not seem to matter much as the calling functions always seem to supply these parameters anyway. --- packaging/os/yum.py | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index a8b996c84de..ac183641a30 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -26,6 +26,7 @@ import traceback import os import yum import rpm +import syslog try: from yum.misc import find_unfinished_transactions, find_ts_remaining @@ -153,8 +154,6 @@ if not os.path.exists(repoquery): yumbin='/usr/bin/yum' -import syslog - def log(msg): syslog.openlog('ansible-yum', 0, syslog.LOG_USER) syslog.syslog(syslog.LOG_NOTICE, msg) @@ -186,8 +185,11 @@ def po_to_nevra(po): else: return '%s-%s-%s.%s' % (po.name, po.version, po.release, po.arch) -def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[], is_pkg=False): - +def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, dis_repos=None, is_pkg=False): + if en_repos is None: + en_repos = [] + if dis_repos is None: + dis_repos = [] if not repoq: pkgs = [] @@ -225,7 +227,11 @@ def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_ return [] -def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]): +def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, dis_repos=None): + if en_repos is None: + en_repos = [] + if dis_repos is None: + dis_repos = [] if not repoq: @@ -262,10 +268,13 @@ def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_ else: module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err)) - return [] -def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]): +def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, dis_repos=None): + if en_repos is None: + en_repos = [] + if dis_repos is None: + dis_repos = [] if not repoq: @@ -312,7 +321,11 @@ def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_rep return [] -def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=[], dis_repos=[]): +def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=None, dis_repos=None): + if en_repos is None: + en_repos = [] + if dis_repos is None: + dis_repos = [] if not repoq: @@ -683,7 +696,7 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): nothing_to_do = False break - if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=en_repos): + if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos): nothing_to_do = False break From 3a294c3379f4819c9b82e154ec32d4617ef7de55 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Thu, 21 May 2015 09:07:23 -0400 Subject: [PATCH 080/464] fixes example documenation --- cloud/amazon/ec2_ami_find.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_ami_find.py b/cloud/amazon/ec2_ami_find.py index 1c790849cff..2c83e0d3204 100644 --- a/cloud/amazon/ec2_ami_find.py +++ b/cloud/amazon/ec2_ami_find.py @@ -141,7 +141,7 @@ EXAMPLES = ''' # Search for the AMI tagged "project:website" - ec2_ami_find: owner: self - tags: + ami_tags: project: website no_result_action: fail register: ami_find From 5a22f052b4f33b64e8e2b1f68d36abe731c1572b Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Thu, 21 May 2015 09:32:12 -0400 Subject: [PATCH 081/464] changes hostname to lowercase --- database/mysql/mysql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index bd289ff30de..ba5b6370f1b 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -396,7 +396,7 @@ def main(): login_password = module.params["login_password"] user = module.params["user"] password = module.params["password"] - host = module.params["host"] + host = module.params["host"].lower() state = module.params["state"] priv = module.params["priv"] check_implicit_admin = module.params['check_implicit_admin'] From 150b71f11af607a31b108f2171308149c99f2cbd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 21 May 2015 07:01:08 -0400 Subject: [PATCH 082/464] removed executable bit --- cloud/amazon/ec2.py | 0 cloud/amazon/rds.py | 0 system/group.py | 0 system/user.py | 0 4 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 cloud/amazon/ec2.py mode change 100755 => 100644 cloud/amazon/rds.py mode change 100755 => 100644 system/group.py mode change 100755 => 100644 system/user.py diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py old mode 100755 new mode 100644 diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py old mode 100755 new mode 100644 diff --git a/system/group.py b/system/group.py old mode 100755 new mode 100644 diff --git a/system/user.py b/system/user.py old mode 100755 new mode 100644 From 3183ab6da17d956dcd82726606b2af1d4ef05687 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Thu, 21 May 2015 12:52:52 +0100 Subject: [PATCH 083/464] Update vsphere_guest.py Add mounted ISO image to examples. --- cloud/vmware/vsphere_guest.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index a09a59c711d..256ec00abab 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -162,6 +162,9 @@ EXAMPLES = ''' num_cpus: 2 osid: centos64Guest scsi: paravirtual + vm_cdrom: + type: "iso" + iso_path: "DatastoreName/cd-image.iso" esxi: datacenter: MyDatacenter hostname: esx001.mydomain.local From 7de4b13124669da23688fb223e392e49e3f56bb5 Mon Sep 17 00:00:00 2001 From: Jamie Hannaford Date: Thu, 21 May 2015 15:52:03 +0200 Subject: [PATCH 084/464] Fix get_launch_config var name --- cloud/rackspace/rax_scaling_group.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/rackspace/rax_scaling_group.py b/cloud/rackspace/rax_scaling_group.py index e3c6ec8944f..b8737632d8c 100644 --- a/cloud/rackspace/rax_scaling_group.py +++ b/cloud/rackspace/rax_scaling_group.py @@ -262,7 +262,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, # Launch Configuration Updates lc = sg.get_launch_config() lc_args = {} - if server_name != lc.get('server_name'): + if server_name != lc.get('name'): lc_args['server_name'] = server_name if image != lc.get('image'): From 9b6a7416155e26e939c7681cb2d61de900792ff3 Mon Sep 17 00:00:00 2001 From: Schlueter Date: Thu, 21 May 2015 12:04:13 -0400 Subject: [PATCH 085/464] Correct pass by object reference issue --- packaging/language/easy_install.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/easy_install.py b/packaging/language/easy_install.py index b95cb1bb08e..6ae42b65967 100644 --- a/packaging/language/easy_install.py +++ b/packaging/language/easy_install.py @@ -90,7 +90,7 @@ EXAMPLES = ''' ''' def _is_package_installed(module, name, easy_install, executable_arguments): - executable_arguments.append('--dry-run') + executable_arguments = executable_arguments + ['--dry-run'] cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name) rc, status_stdout, status_stderr = module.run_command(cmd) return not ('Reading' in status_stdout or 'Downloading' in status_stdout) From 3bae8bda10854ebaea7daed53d07dfddbce30900 Mon Sep 17 00:00:00 2001 From: Schlueter Date: Thu, 21 May 2015 12:16:26 -0400 Subject: [PATCH 086/464] Use standard if statement to check state in easy_install resource --- packaging/language/easy_install.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packaging/language/easy_install.py b/packaging/language/easy_install.py index 6ae42b65967..635f1b64f3f 100644 --- a/packaging/language/easy_install.py +++ b/packaging/language/easy_install.py @@ -143,7 +143,8 @@ def main(): site_packages = module.params['virtualenv_site_packages'] virtualenv_command = module.params['virtualenv_command'] executable_arguments = [] - module.params['state'] is 'latest' and executable_arguments.append('--upgrade') + if module.params['state'] == 'latest': + executable_arguments.append('--upgrade') rc = 0 err = '' From 02cd8489c17556128b344c22b59eb4799ac84ebe Mon Sep 17 00:00:00 2001 From: Schlueter Date: Thu, 21 May 2015 12:23:05 -0400 Subject: [PATCH 087/464] Add Documentation --- packaging/language/easy_install.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/packaging/language/easy_install.py b/packaging/language/easy_install.py index 635f1b64f3f..eaf16e6de92 100644 --- a/packaging/language/easy_install.py +++ b/packaging/language/easy_install.py @@ -70,6 +70,12 @@ options: version_added: "1.3" required: false default: null + state: + description: + - The desired state of the gem. C(latest) ensures that the latest version is installed. + required: false + choices: [present, latest] + default: present notes: - Please note that the M(easy_install) module can only install Python libraries. Thus this module is not able to remove libraries. It is @@ -83,7 +89,7 @@ author: Matt Wright EXAMPLES = ''' # Examples from Ansible Playbooks -- easy_install: name=pip +- easy_install: name=pip state=latest # Install Bottle into the specified virtualenv. - easy_install: name=bottle virtualenv=/webapps/myapp/venv From 17544062ec6fa4035538a11d030632cce3ad8d78 Mon Sep 17 00:00:00 2001 From: Schlueter Date: Thu, 21 May 2015 12:49:43 -0400 Subject: [PATCH 088/464] Correct reference to gem in easy_install resource --- packaging/language/easy_install.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/easy_install.py b/packaging/language/easy_install.py index eaf16e6de92..e1924d0c160 100644 --- a/packaging/language/easy_install.py +++ b/packaging/language/easy_install.py @@ -72,7 +72,7 @@ options: default: null state: description: - - The desired state of the gem. C(latest) ensures that the latest version is installed. + - The desired state of the library. C(latest) ensures that the latest version is installed. required: false choices: [present, latest] default: present From 1030cb48a793fe685f55c283933f82f4afb014e3 Mon Sep 17 00:00:00 2001 From: Schlueter Date: Thu, 21 May 2015 12:54:48 -0400 Subject: [PATCH 089/464] Add 'version_added' to state documentation for easy_install resource --- packaging/language/easy_install.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/language/easy_install.py b/packaging/language/easy_install.py index e1924d0c160..77efeae797f 100644 --- a/packaging/language/easy_install.py +++ b/packaging/language/easy_install.py @@ -71,6 +71,7 @@ options: required: false default: null state: + version_added: "2.0" description: - The desired state of the library. C(latest) ensures that the latest version is installed. required: false From f0dc27227c11d0fea849be00e41b5e5640a42af8 Mon Sep 17 00:00:00 2001 From: Vasyl Kaigorodov Date: Thu, 21 May 2015 23:24:20 +0200 Subject: [PATCH 090/464] Fix "_is_package_installed() takes exactly 4 arguments, 3 provided" error --- packaging/language/easy_install.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/easy_install.py b/packaging/language/easy_install.py index 77efeae797f..d566e003d81 100644 --- a/packaging/language/easy_install.py +++ b/packaging/language/easy_install.py @@ -177,7 +177,7 @@ def main(): cmd = None changed = False - installed = _is_package_installed(module, name, easy_install) + installed = _is_package_installed(module, name, easy_install, executable_arguments) if not installed: if module.check_mode: From a14aee5239e664b9e1583779822b7368a5ff0395 Mon Sep 17 00:00:00 2001 From: Jesse Keating Date: Thu, 21 May 2015 16:19:57 -0700 Subject: [PATCH 091/464] Add an openstack servers actions module This module supports a few of the server actions that are easy to initially impiment. Other actions require input and provide return values in the API calls that will be more difficult to impliment, and thus are not part of this initial commit. --- cloud/openstack/os_server_actions.py | 192 +++++++++++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100644 cloud/openstack/os_server_actions.py diff --git a/cloud/openstack/os_server_actions.py b/cloud/openstack/os_server_actions.py new file mode 100644 index 00000000000..2b739df4de1 --- /dev/null +++ b/cloud/openstack/os_server_actions.py @@ -0,0 +1,192 @@ +#!/usr/bin/python +# coding: utf-8 -*- + +# Copyright (c) 2015, Jesse Keating +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + from shade import meta + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + + +DOCUMENTATION = ''' +--- +module: os_server_actions +short_description: Perform actions on Compute Instances from OpenStack +extends_documentation_fragment: openstack +version_added: "2.0" +description: + - Perform server actions on an existing compute instance from OpenStack. + This module does not return any data other than changed true/false. +options: + server: + description: + - Name or ID of the instance + required: true + wait: + description: + - If the module should wait for the instance action to be performed. + required: false + default: 'yes' + timeout: + description: + - The amount of time the module should wait for the instance to perform + the requested action. + required: false + default: 180 + action: + description: + - Perform the given action. The lock and unlock actions always return + changed as the servers API does not provide lock status. + choices: [pause, unpause, lock, unlock, suspend, resume] + default: present +requirements: + - "python >= 2.6" + - "shade" +''' + +EXAMPLES = ''' +# Pauses a compute instance +- os_server_actions: + action: pause + auth: + auth_url: https://mycloud.openstack.blueboxgrid.com:5001/v2.0 + username: admin + password: admin + project_name: admin + server: vm1 + timeout: 200 +''' + +_action_map = {'pause': 'PAUSED', + 'unpause': 'ACTIVE', + 'lock': 'ACTIVE', # API doesn't show lock/unlock status + 'unlock': 'ACTIVE', + 'suspend': 'SUSPENDED', + 'resume': 'ACTIVE',} + +_admin_actions = ['pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock'] + +def _wait(timeout, cloud, server, action): + """Wait for the server to reach the desired state for the given action.""" + + for count in shade._iterate_timeout( + timeout, + "Timeout waiting for server to complete %s" % action): + try: + server = cloud.get_server(server.id) + except Exception: + continue + + if server.status == _action_map[action]: + return + + if server.status == 'ERROR': + module.fail_json(msg="Server reached ERROR state while attempting to %s" % action) + +def _system_state_change(action, status): + """Check if system state would change.""" + if status == _action_map[action]: + return False + return True + +def main(): + argument_spec = openstack_full_argument_spec( + server=dict(required=True), + action=dict(required=True, choices=['pause', 'unpause', 'lock', 'unlock', 'suspend', + 'resume']), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + action = module.params['action'] + wait = module.params['wait'] + timeout = module.params['timeout'] + + try: + if action in _admin_actions: + cloud = shade.operator_cloud(**module.params) + else: + cloud = shade.openstack_cloud(**module.params) + server = cloud.get_server(module.params['server']) + if not server: + module.fail_json(msg='Could not find server %s' % server) + status = server.status + + if module.check_mode: + module.exit_json(changed=_system_state_change(action, status)) + + if action == 'pause': + if not _system_state_change(action, status): + module.exit_json(changed=False) + + cloud.nova_client.servers.pause(server=server.id) + if wait: + _wait(timeout, cloud, server, action) + module.exit_json(changed=True) + + elif action == 'unpause': + if not _system_state_change(action, status): + module.exit_json(changed=False) + + cloud.nova_client.servers.unpause(server=server.id) + if wait: + _wait(timeout, cloud, server, action) + module.exit_json(changed=True) + + elif action == 'lock': + # lock doesn't set a state, just do it + cloud.nova_client.servers.lock(server=server.id) + module.exit_json(changed=True) + + elif action == 'unlock': + # unlock doesn't set a state, just do it + cloud.nova_client.servers.unlock(server=server.id) + module.exit_json(changed=True) + + elif action == 'suspend': + if not _system_state_change(action, status): + module.exit_json(changed=False) + + cloud.nova_client.servers.suspend(server=server.id) + if wait: + _wait(timeout, cloud, server, action) + module.exit_json(changed=True) + + elif action == 'resume': + if not _system_state_change(action, status): + module.exit_json(changed=False) + + cloud.nova_client.servers.resume(server=server.id) + if wait: + _wait(timeout, cloud, server, action) + module.exit_json(changed=True) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message, extra_data=e.extra_data) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +if __name__ == '__main__': + main() From 5f5be7bc2a507a4dd1825efc50b3b072e892e2de Mon Sep 17 00:00:00 2001 From: marko Date: Fri, 22 May 2015 12:34:41 +0200 Subject: [PATCH 092/464] Added support for foreign security groups (Fixes: #373) --- cloud/amazon/ec2_group.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/cloud/amazon/ec2_group.py b/cloud/amazon/ec2_group.py index 6552e5abf67..b85fde9ead3 100644 --- a/cloud/amazon/ec2_group.py +++ b/cloud/amazon/ec2_group.py @@ -90,6 +90,14 @@ EXAMPLES = ''' from_port: 22 to_port: 22 cidr_ip: 10.0.0.0/8 + - proto: tcp + from_port: 443 + to_port: 443 + group_id: amazon-elb/sg-87654321/amazon-elb-sg + - proto: tcp + from_port: 3306 + to_port: 3306 + group_id: 123412341234/sg-87654321/exact-name-of-sg - proto: udp from_port: 10050 to_port: 10050 @@ -113,6 +121,7 @@ EXAMPLES = ''' try: import boto.ec2 + from boto.ec2.securitygroup import SecurityGroup HAS_BOTO = True except ImportError: HAS_BOTO = False @@ -148,6 +157,7 @@ def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id): group_id or a non-None ip range. """ + FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)' group_id = None group_name = None ip = None @@ -158,6 +168,12 @@ def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id): module.fail_json(msg="Specify group_name OR cidr_ip, not both") elif 'group_id' in rule and 'group_name' in rule: module.fail_json(msg="Specify group_id OR group_name, not both") + elif 'group_id' in rule and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']): + # this is a foreign Security Group. Since you can't fetch it you must create an instance of it + owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups() + group_instance = SecurityGroup(owner_id=owner_id, name=group_name, id=group_id) + groups[group_id] = group_instance + groups[group_name] = group_instance elif 'group_id' in rule: group_id = rule['group_id'] elif 'group_name' in rule: @@ -319,6 +335,11 @@ def main(): for (rule, grant) in groupRules.itervalues() : grantGroup = None if grant.group_id: + if grant.owner_id != group.owner_id: + # this is a foreign Security Group. Since you can't fetch it you must create an instance of it + group_instance = SecurityGroup(owner_id=grant.owner_id, name=grant.name, id=grant.group_id) + groups[grant.group_id] = group_instance + groups[grant.name] = group_instance grantGroup = groups[grant.group_id] if not module.check_mode: group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup) From 771b369005ff1a7d9f6fab7cb6892e09cf087931 Mon Sep 17 00:00:00 2001 From: Vladislav Lewin Date: Fri, 22 May 2015 13:26:25 +0200 Subject: [PATCH 093/464] add SLE12 support to hostname.py --- system/hostname.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/system/hostname.py b/system/hostname.py index 1fe16e506f4..b90b0441595 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -368,6 +368,15 @@ class FedoraHostname(Hostname): distribution = 'Fedora' strategy_class = SystemdStrategy +class SLESHostname(Hostname): + platform = 'Linux' + distribution = 'Suse linux enterprise server ' + distribution_version = get_distribution_version() + if distribution_version and LooseVersion(distribution_version) >= LooseVersion("12"): + strategy_class = SystemdStrategy + else: + strategy_class = UnimplementedStrategy + class OpenSUSEHostname(Hostname): platform = 'Linux' distribution = 'Opensuse ' From 3d1b6285c3b429f59aad4cfc5b5a5eb629779d4b Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Fri, 22 May 2015 18:21:56 +0100 Subject: [PATCH 094/464] Update vsphere_guest.py Clear up confusing action where disk and hardware state can be passed to a reconfigure but is ignored (module only changes CPU and RAM when state is 'reconfigured' and ignore everything else. --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index a33fd52ea70..175ed35724b 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -65,7 +65,7 @@ options: default: null state: description: - - Indicate desired state of the vm. + - Indicate desired state of the vm. 'reconfigured' only applies changes to 'memory_mb' and 'num_cpus' in vm_hardware parameter, and only when hot-plugging is enabled for the guest. default: present choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured'] from_template: From 6a04f0d45288a9bad860ec28ca5684fe6edee347 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Fri, 22 May 2015 18:40:35 +0100 Subject: [PATCH 095/464] Update vsphere_guest.py Clarifies that when using a template, CPU, RAM, NIC and Disk params are silently ignored. --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 175ed35724b..20b41942ed6 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -71,7 +71,7 @@ options: from_template: version_added: "1.9" description: - - Specifies if the VM should be deployed from a template (cannot be ran with state) + - Specifies if the VM should be deployed from a template (annot be run with state). Only accepts 'cluster' and 'resource_pool' params. No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied this way. default: no choices: ['yes', 'no'] template_src: From 14d7073b517aff1121b907213016490971d49754 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Fri, 22 May 2015 18:43:20 +0100 Subject: [PATCH 096/464] Update vsphere_guest.py Remove etc. As reconfigure only reconfigures RAM and CPU. --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 20b41942ed6..7faee2303d5 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -195,7 +195,7 @@ EXAMPLES = ''' hostname: esx001.mydomain.local # Deploy a guest from a template -# No reconfiguration of the destination guest is done at this stage, a reconfigure would be needed to adjust memory/cpu etc.. +# No reconfiguration of the destination guest is done at this stage, a reconfigure is needed to adjust RAM/CPU. - vsphere_guest: vcenter_hostname: vcenter.mydomain.local username: myuser From 0ec4c43931c200dc289798a5efea494c8b5dba37 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Fri, 22 May 2015 19:05:25 +0100 Subject: [PATCH 097/464] Update vsphere_guest.py Spelling mistake. --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 7faee2303d5..93fb8961f56 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -71,7 +71,7 @@ options: from_template: version_added: "1.9" description: - - Specifies if the VM should be deployed from a template (annot be run with state). Only accepts 'cluster' and 'resource_pool' params. No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied this way. + - Specifies if the VM should be deployed from a template (cannot be run with state). Only accepts 'cluster' and 'resource_pool' params. No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied this way. default: no choices: ['yes', 'no'] template_src: From 7325d6a8ef43e5dee9bbffab71ebfa4323895ab8 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Fri, 22 May 2015 19:07:28 +0100 Subject: [PATCH 098/464] Update vsphere_guest.py Remove redundant description. --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 93fb8961f56..8e1aa686701 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -71,7 +71,7 @@ options: from_template: version_added: "1.9" description: - - Specifies if the VM should be deployed from a template (cannot be run with state). Only accepts 'cluster' and 'resource_pool' params. No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied this way. + - Specifies if the VM should be deployed from a template (cannot be run with state). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template. default: no choices: ['yes', 'no'] template_src: From b5399d34464475a9aa87c6b928628cd262022cd5 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 22 May 2015 13:25:05 -0500 Subject: [PATCH 099/464] Re-reverting windows modules PR #384 --- windows/win_copy.ps1 | 84 ++++++++++++++++++++++++++++++++ windows/win_copy.py | 60 +++++++++++++++++++++++ windows/win_file.ps1 | 105 ++++++++++++++++++++++++++++++++++++++++ windows/win_file.py | 73 ++++++++++++++++++++++++++++ windows/win_stat.ps1 | 6 +-- windows/win_template.py | 52 ++++++++++++++++++++ 6 files changed, 376 insertions(+), 4 deletions(-) create mode 100644 windows/win_copy.ps1 create mode 100644 windows/win_copy.py create mode 100644 windows/win_file.ps1 create mode 100644 windows/win_file.py create mode 100644 windows/win_template.py diff --git a/windows/win_copy.ps1 b/windows/win_copy.ps1 new file mode 100644 index 00000000000..9ffdab85f03 --- /dev/null +++ b/windows/win_copy.ps1 @@ -0,0 +1,84 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +$src= Get-Attr $params "src" $FALSE; +If ($src -eq $FALSE) +{ + Fail-Json (New-Object psobject) "missing required argument: src"; +} + +$dest= Get-Attr $params "dest" $FALSE; +If ($dest -eq $FALSE) +{ + Fail-Json (New-Object psobject) "missing required argument: dest"; +} + +# seems to be supplied by the calling environment, but +# probably shouldn't be a test for it existing in the params. +# TODO investigate. +$original_basename = Get-Attr $params "original_basename" $FALSE; +If ($original_basename -eq $FALSE) +{ + Fail-Json (New-Object psobject) "missing required argument: original_basename "; +} + +$result = New-Object psobject @{ + changed = $FALSE +}; + +# if $dest is a dir, append $original_basename so the file gets copied with its intended name. +if (Test-Path $dest -PathType Container) +{ + $dest = Join-Path $dest $original_basename; +} + +If (Test-Path $dest) +{ + $dest_checksum = Get-FileChecksum ($dest); + $src_checksum = Get-FileChecksum ($src); + + If (! $src_checksum.CompareTo($dest_checksum)) + { + # New-Item -Force creates subdirs for recursive copies + New-Item -Force $dest -Type file; + Copy-Item -Path $src -Destination $dest -Force; + } + $dest_checksum = Get-FileChecksum ($dest); + If ( $src_checksum.CompareTo($dest_checksum)) + { + $result.changed = $TRUE; + } + Else + { + Fail-Json (New-Object psobject) "Failed to place file"; + } +} +Else +{ + New-Item -Force $dest -Type file; + Copy-Item -Path $src -Destination $dest; + $result.changed = $TRUE; +} + +$dest_checksum = Get-FileChecksum($dest); +$result.checksum = $dest_checksum; + +Exit-Json $result; diff --git a/windows/win_copy.py b/windows/win_copy.py new file mode 100644 index 00000000000..7d0b49e5985 --- /dev/null +++ b/windows/win_copy.py @@ -0,0 +1,60 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import time + +DOCUMENTATION = ''' +--- +module: win_copy +version_added: "1.8" +short_description: Copies files to remote locations on windows hosts. +description: + - The M(win_copy) module copies a file on the local box to remote windows locations. +options: + src: + description: + - Local path to a file to copy to the remote server; can be absolute or relative. + If path is a directory, it is copied recursively. In this case, if path ends + with "/", only inside contents of that directory are copied to destination. + Otherwise, if it does not end with "/", the directory itself with all contents + is copied. This behavior is similar to Rsync. + required: false + default: null + aliases: [] + dest: + description: + - Remote absolute path where the file should be copied to. If src is a directory, + this must be a directory too. Use \\ for path separators. + required: true + default: null +author: Michael DeHaan +notes: + - The "win_copy" module recursively copy facility does not scale to lots (>hundreds) of files. + Instead, you may find it better to create files locally, perhaps using win_template, and + then use win_get_url to put them in the correct location. +''' + +EXAMPLES = ''' +# Example from Ansible Playbooks +- win_copy: src=/srv/myfiles/foo.conf dest=c:\\TEMP\\foo.conf + +''' + diff --git a/windows/win_file.ps1 b/windows/win_file.ps1 new file mode 100644 index 00000000000..62ac81fc1ee --- /dev/null +++ b/windows/win_file.ps1 @@ -0,0 +1,105 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +# path +$path = Get-Attr $params "path" $FALSE; +If ($path -eq $FALSE) +{ + $path = Get-Attr $params "dest" $FALSE; + If ($path -eq $FALSE) + { + $path = Get-Attr $params "name" $FALSE; + If ($path -eq $FALSE) + { + Fail-Json (New-Object psobject) "missing required argument: path"; + } + } +} + +# JH Following advice from Chris Church, only allow the following states +# in the windows version for now: +# state - file, directory, touch, absent +# (originally was: state - file, link, directory, hard, touch, absent) + +$state = Get-Attr $params "state" "file"; + +#$recurse = Get-Attr $params "recurse" "no"; + +# force - yes, no +# $force = Get-Attr $params "force" "no"; + +# result +$result = New-Object psobject @{ + changed = $FALSE +}; + +If ( $state -eq "touch" ) +{ + If(Test-Path $path) + { + (Get-ChildItem $path).LastWriteTime = Get-Date + } + Else + { + echo $null > $file + } + $result.changed = $TRUE; +} + +If (Test-Path $path) +{ + $fileinfo = Get-Item $path; + If ( $state -eq "absent" ) + { + Remove-Item -Recurse -Force $fileinfo; + $result.changed = $TRUE; + } + Else + { + # Only files have the .Directory attribute. + If ( $state -eq "directory" -and $fileinfo.Directory ) + { + Fail-Json (New-Object psobject) "path is not a directory"; + } + + # Only files have the .Directory attribute. + If ( $state -eq "file" -and -not $fileinfo.Directory ) + { + Fail-Json (New-Object psobject) "path is not a file"; + } + + } +} +Else +{ + If ( $state -eq "directory" ) + { + New-Item -ItemType directory -Path $path + $result.changed = $TRUE; + } + + If ( $state -eq "file" ) + { + Fail-Json (New-Object psobject) "path will not be created"; + } +} + +Exit-Json $result; diff --git a/windows/win_file.py b/windows/win_file.py new file mode 100644 index 00000000000..6a218216617 --- /dev/null +++ b/windows/win_file.py @@ -0,0 +1,73 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION = ''' +--- +module: win_file +version_added: "1.8" +short_description: Creates, touches or removes files or directories. +extends_documentation_fragment: files +description: + - Creates (empty) files, updates file modification stamps of existing files, + and can create or remove directories. + Unlike M(file), does not modify ownership, permissions or manipulate links. +notes: + - See also M(win_copy), M(win_template), M(copy), M(template), M(assemble) +requirements: [ ] +author: Michael DeHaan +options: + path: + description: + - 'path to the file being managed. Aliases: I(dest), I(name)' + required: true + default: [] + aliases: ['dest', 'name'] + state: + description: + - If C(directory), all immediate subdirectories will be created if they + do not exist. + If C(file), the file will NOT be created if it does not exist, see the M(copy) + or M(template) module if you want that behavior. If C(absent), + directories will be recursively deleted, and files will be removed. + If C(touch), an empty file will be created if the c(path) does not + exist, while an existing file or directory will receive updated file access and + modification times (similar to the way `touch` works from the command line). + required: false + default: file + choices: [ file, directory, touch, absent ] +''' + +EXAMPLES = ''' +# create a file +- win_file: path=C:\\temp\\foo.conf + +# touch a file (creates if not present, updates modification time if present) +- win_file: path=C:\\temp\\foo.conf state=touch + +# remove a file, if present +- win_file: path=C:\\temp\\foo.conf state=absent + +# create directory structure +- win_file: path=C:\\temp\\folder\\subfolder state=directory + +# remove directory structure +- win_file: path=C:\\temp state=absent +''' diff --git a/windows/win_stat.ps1 b/windows/win_stat.ps1 index 4e4c55b2aa3..10101a62b30 100644 --- a/windows/win_stat.ps1 +++ b/windows/win_stat.ps1 @@ -53,11 +53,9 @@ Else If ($get_md5 -and $result.stat.exists -and -not $result.stat.isdir) { - $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider; - $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); - $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); - $fp.Dispose(); + $hash = Get-FileChecksum($path); Set-Attr $result.stat "md5" $hash; + Set-Attr $result.stat "checksum" $hash; } Exit-Json $result; diff --git a/windows/win_template.py b/windows/win_template.py new file mode 100644 index 00000000000..402702f93b2 --- /dev/null +++ b/windows/win_template.py @@ -0,0 +1,52 @@ +# this is a virtual module that is entirely implemented server side + +DOCUMENTATION = ''' +--- +module: win_template +version_added: 1.8 +short_description: Templates a file out to a remote server. +description: + - Templates are processed by the Jinja2 templating language + (U(http://jinja.pocoo.org/docs/)) - documentation on the template + formatting can be found in the Template Designer Documentation + (U(http://jinja.pocoo.org/docs/templates/)). + - "Six additional variables can be used in templates: C(ansible_managed) + (configurable via the C(defaults) section of C(ansible.cfg)) contains a string + which can be used to describe the template name, host, modification time of the + template file and the owner uid, C(template_host) contains the node name of + the template's machine, C(template_uid) the owner, C(template_path) the + absolute path of the template, C(template_fullpath) is the absolute path of the + template, and C(template_run_date) is the date that the template was rendered. Note that including + a string that uses a date in the template will result in the template being marked 'changed' + each time." +options: + src: + description: + - Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path. + required: true + default: null + aliases: [] + dest: + description: + - Location to render the template to on the remote machine. + required: true + default: null + backup: + description: + - Create a backup file including the timestamp information so you can get + the original file back if you somehow clobbered it incorrectly. + required: false + choices: [ "yes", "no" ] + default: "no" +notes: + - "templates are loaded with C(trim_blocks=True)." +requirements: [] +author: Michael DeHaan +''' + +EXAMPLES = ''' +# Example +- win_template: src=/mytemplates/foo.j2 dest=C:\\temp\\file.conf + + +''' From 3f679933a6695f91d24d0ed02b52f9caab2d4e5d Mon Sep 17 00:00:00 2001 From: Ryan Walls Date: Fri, 22 May 2015 16:36:38 -0600 Subject: [PATCH 100/464] Add support for --log-driver option that docker released with Docker 1.6 --- cloud/docker/docker.py | 112 +++++++++++++++++++++++++++-------------- 1 file changed, 74 insertions(+), 38 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index cb6d3dae075..e4c27797b71 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -92,6 +92,21 @@ options: - 'alias. Use docker CLI-style syntax: C(redis:myredis).' default: null version_added: "1.5" + log_driver: + description: + - You can specify a different logging driver for the container than for the daemon. + "json-file" Default logging driver for Docker. Writes JSON messages to file. + docker logs command is available only for this logging driver. + "none" disables any logging for the container. docker logs won't be available with this driver. + "syslog" Syslog logging driver for Docker. Writes log messages to syslog. + docker logs command is not available for this logging driver. + Requires docker >= 1.6.0. + required: false + default: json-file + choices: + - json-file + - none + - syslog memory_limit: description: - RAM allocated to the container as a number of bytes or as a human-readable @@ -506,6 +521,7 @@ class DockerManager(object): 'restart_policy': ((0, 5, 0), '1.14'), 'extra_hosts': ((0, 7, 0), '1.3.1'), 'pid': ((1, 0, 0), '1.17'), + 'log_driver': ((1, 2, 0), '1.18'), # Clientside only 'insecure_registry': ((0, 5, 0), '0.0') } @@ -1110,6 +1126,15 @@ class DockerManager(object): self.reload_reasons.append('volumes_from ({0} => {1})'.format(actual_volumes_from, expected_volumes_from)) differing.append(container) + # LOG_DRIVER + + expected_log_driver = set(self.module.params.get('log_driver') or []) + actual_log_driver = set(container['HostConfig']['LogConfig'] or []) + if actual_log_driver != expected_log_driver: + self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver)) + differing.append(container) + continue + return differing def get_deployed_containers(self): @@ -1206,6 +1231,52 @@ class DockerManager(object): except Exception as e: self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e)) + def create_host_config(self): + params = { + 'lxc_conf': self.lxc_conf, + 'binds': self.binds, + 'port_bindings': self.port_bindings, + 'publish_all_ports': self.module.params.get('publish_all_ports'), + 'privileged': self.module.params.get('privileged'), + 'links': self.links, + 'network_mode': self.module.params.get('net'), + } + + optionals = {} + for optional_param in ('dns', 'volumes_from', 'restart_policy', + 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver'): + optionals[optional_param] = self.module.params.get(optional_param) + + if optionals['dns'] is not None: + self.ensure_capability('dns') + params['dns'] = optionals['dns'] + + if optionals['volumes_from'] is not None: + self.ensure_capability('volumes_from') + params['volumes_from'] = optionals['volumes_from'] + + if optionals['restart_policy'] is not None: + self.ensure_capability('restart_policy') + params['restart_policy'] = { 'Name': optionals['restart_policy'] } + if params['restart_policy']['Name'] == 'on-failure': + params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] + + if optionals['pid'] is not None: + self.ensure_capability('pid') + params['pid_mode'] = optionals['pid'] + + if optionals['extra_hosts'] is not None: + self.ensure_capability('extra_hosts') + params['extra_hosts'] = optionals['extra_hosts'] + + if optionals['log_driver'] is not None: + self.ensure_capability('log_driver') + log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON) + log_config.type = optionals['log_driver'] + params['log_config'] = log_config + + return docker.utils.create_host_config(**params) + def create_containers(self, count=1): try: mem_limit = _human_to_bytes(self.module.params.get('memory_limit')) @@ -1224,6 +1295,7 @@ class DockerManager(object): 'name': self.module.params.get('name'), 'stdin_open': self.module.params.get('stdin_open'), 'tty': self.module.params.get('tty'), + 'host_config': self.create_host_config(), } def do_create(count, params): @@ -1244,45 +1316,8 @@ class DockerManager(object): return containers def start_containers(self, containers): - params = { - 'lxc_conf': self.lxc_conf, - 'binds': self.binds, - 'port_bindings': self.port_bindings, - 'publish_all_ports': self.module.params.get('publish_all_ports'), - 'privileged': self.module.params.get('privileged'), - 'links': self.links, - 'network_mode': self.module.params.get('net'), - } - - optionals = {} - for optional_param in ('dns', 'volumes_from', 'restart_policy', - 'restart_policy_retry', 'pid', 'extra_hosts'): - optionals[optional_param] = self.module.params.get(optional_param) - - if optionals['dns'] is not None: - self.ensure_capability('dns') - params['dns'] = optionals['dns'] - - if optionals['volumes_from'] is not None: - self.ensure_capability('volumes_from') - params['volumes_from'] = optionals['volumes_from'] - - if optionals['restart_policy'] is not None: - self.ensure_capability('restart_policy') - params['restart_policy'] = { 'Name': optionals['restart_policy'] } - if params['restart_policy']['Name'] == 'on-failure': - params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] - - if optionals['pid'] is not None: - self.ensure_capability('pid') - params['pid_mode'] = optionals['pid'] - - if optionals['extra_hosts'] is not None: - self.ensure_capability('extra_hosts') - params['extra_hosts'] = optionals['extra_hosts'] - for i in containers: - self.client.start(i['Id'], **params) + self.client.start(i) self.increment_counter('started') def stop_containers(self, containers): @@ -1475,6 +1510,7 @@ def main(): net = dict(default=None), pid = dict(default=None), insecure_registry = dict(default=False, type='bool'), + log_driver = dict(default='json-file', choices=['json-file', 'none', 'syslog']), ), required_together = ( ['tls_client_cert', 'tls_client_key'], From 442b6a206722df398a6611b3575f7f9842f5d52b Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Fri, 22 May 2015 21:28:36 -0400 Subject: [PATCH 101/464] fixes check mode for Ubuntu 14.04 --- system/service.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/system/service.py b/system/service.py index 42b9a7762f3..14c0026d717 100644 --- a/system/service.py +++ b/system/service.py @@ -765,6 +765,9 @@ class LinuxService(Service): else: action = 'disable' + if self.module.check_mode: + rc = 0 + return (rc, out, err) = self.execute_command("%s %s %s" % (self.enable_cmd, self.name, action)) if rc != 0: if err: From 27c174128b054e8217a9a0a5d0e454e7f29546aa Mon Sep 17 00:00:00 2001 From: Andrew Pashkin Date: Sat, 23 May 2015 23:17:30 +0300 Subject: [PATCH 102/464] Add 'docker_api_version' to docker_image By default docker-py uses latest version of Docker API. This is not always desireable, and this patch adds option to specify version, that should be used. --- cloud/docker/docker_image.py | 38 +++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index faf47cd6e09..10f63a987c5 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -65,6 +65,12 @@ options: required: false default: unix://var/run/docker.sock aliases: [] + docker_api_version: + description: + - Remote API version to use. This defaults to the current default as + specified by docker-py. + default: docker-py default remote API version + version_added: "2.0" state: description: - Set the state of the image @@ -137,6 +143,14 @@ if HAS_DOCKER_CLIENT: except ImportError: from docker.client import APIError as DockerAPIError + try: + # docker-py 1.2+ + import docker.constants + DEFAULT_DOCKER_API_VERSION = docker.constants.DEFAULT_DOCKER_API_VERSION + except (ImportError, AttributeError): + # docker-py less than 1.2 + DEFAULT_DOCKER_API_VERSION = docker.client.DEFAULT_DOCKER_API_VERSION + class DockerImageManager: def __init__(self, module): @@ -147,7 +161,10 @@ class DockerImageManager: self.tag = self.module.params.get('tag') self.nocache = self.module.params.get('nocache') docker_url = urlparse(module.params.get('docker_url')) - self.client = docker.Client(base_url=docker_url.geturl(), timeout=module.params.get('timeout')) + self.client = docker.Client( + base_url=docker_url.geturl(), + version=module.params.get('docker_api_version'), + timeout=module.params.get('timeout')) self.changed = False self.log = [] self.error_msg = None @@ -220,14 +237,17 @@ class DockerImageManager: def main(): module = AnsibleModule( argument_spec = dict( - path = dict(required=False, default=None), - dockerfile = dict(required=False, default="Dockerfile"), - name = dict(required=True), - tag = dict(required=False, default="latest"), - nocache = dict(default=False, type='bool'), - state = dict(default='present', choices=['absent', 'present', 'build']), - docker_url = dict(default='unix://var/run/docker.sock'), - timeout = dict(default=600, type='int'), + path = dict(required=False, default=None), + dockerfile = dict(required=False, default="Dockerfile"), + name = dict(required=True), + tag = dict(required=False, default="latest"), + nocache = dict(default=False, type='bool'), + state = dict(default='present', choices=['absent', 'present', 'build']), + docker_url = dict(default='unix://var/run/docker.sock'), + docker_api_version = dict(required=False, + default=DEFAULT_DOCKER_API_VERSION, + type='str'), + timeout = dict(default=600, type='int'), ) ) if not HAS_DOCKER_CLIENT: From 31f3319b5650a2ca0d4ec50e99df355b8ab3de5a Mon Sep 17 00:00:00 2001 From: Wang Qiang Date: Mon, 25 May 2015 09:25:11 +0800 Subject: [PATCH 103/464] Do instance update after add tags to instance. --- cloud/amazon/ec2.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index ca0e3d34bc6..019e4902fce 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -1025,6 +1025,7 @@ def create_instances(module, ec2, vpc, override_count=None): instance_dict_array = [] created_instance_ids = [] for inst in running_instances: + inst.update() d = get_instance_info(inst) created_instance_ids.append(inst.id) instance_dict_array.append(d) From 1e2ce363f754e34ee0968897cf26d4de1fa1fd4b Mon Sep 17 00:00:00 2001 From: fdupoux Date: Mon, 25 May 2015 13:40:15 +0100 Subject: [PATCH 104/464] Decompress mysql dumps on the fly using python subprocess during an import to simplify operation --- database/mysql/mysql_db.py | 88 +++++++++++++++++--------------------- 1 file changed, 40 insertions(+), 48 deletions(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index 16ddf93e7a5..b28ef219f63 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -111,6 +111,7 @@ import ConfigParser import os import pipes import stat +import subprocess try: import MySQLdb except ImportError: @@ -166,56 +167,47 @@ def db_import(module, host, user, password, db_name, target, all_databases, port if not all_databases: cmd += " -D %s" % pipes.quote(db_name) if os.path.splitext(target)[-1] == '.gz': - gzip_path = module.get_bin_path('gzip') - if not gzip_path: - module.fail_json(msg="gzip command not found") - #gzip -d file (uncompress) - rc, stdout, stderr = module.run_command('%s -d %s' % (gzip_path, target)) - if rc != 0: - return rc, stdout, stderr - #Import sql - cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) - try: - rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) - if rc != 0: - return rc, stdout, stderr - finally: - #gzip file back up - module.run_command('%s %s' % (gzip_path, os.path.splitext(target)[0])) + zcat_path = module.get_bin_path('zcat') + if not zcat_path: + module.fail_json(msg="zcat command not found") + p1 = subprocess.Popen([zcat_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p2 = subprocess.Popen(cmd.split(' '), stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (stdout2, stderr2) = p2.communicate() + p1.stdout.close() + p1.wait() + if p1.returncode != 0: + stderr1 = p1.stderr.read() + return p1.returncode, '', stderr1 + else: + return p2.returncode, stdout2, stderr2 elif os.path.splitext(target)[-1] == '.bz2': - bzip2_path = module.get_bin_path('bzip2') - if not bzip2_path: - module.fail_json(msg="bzip2 command not found") - #bzip2 -d file (uncompress) - rc, stdout, stderr = module.run_command('%s -d %s' % (bzip2_path, target)) - if rc != 0: - return rc, stdout, stderr - #Import sql - cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) - try: - rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) - if rc != 0: - return rc, stdout, stderr - finally: - #bzip2 file back up - rc, stdout, stderr = module.run_command('%s %s' % (bzip2_path, os.path.splitext(target)[0])) + bzcat_path = module.get_bin_path('bzcat') + if not bzcat_path: + module.fail_json(msg="bzcat command not found") + p1 = subprocess.Popen([bzcat_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p2 = subprocess.Popen(cmd.split(' '), stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (stdout2, stderr2) = p2.communicate() + p1.stdout.close() + p1.wait() + if p1.returncode != 0: + stderr1 = p1.stderr.read() + return p1.returncode, '', stderr1 + else: + return p2.returncode, stdout2, stderr2 elif os.path.splitext(target)[-1] == '.xz': - xz_path = module.get_bin_path('xz') - if not xz_path: - module.fail_json(msg="xz command not found") - #xz -d file (uncompress) - rc, stdout, stderr = module.run_command('%s -d %s' % (xz_path, target)) - if rc != 0: - return rc, stdout, stderr - #Import sql - cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) - try: - rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) - if rc != 0: - return rc, stdout, stderr - finally: - #xz file back up - rc, stdout, stderr = module.run_command('%s %s' % (xz_path, os.path.splitext(target)[0])) + xzcat_path = module.get_bin_path('xzcat') + if not xzcat_path: + module.fail_json(msg="xzcat command not found") + p1 = subprocess.Popen([xzcat_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p2 = subprocess.Popen(cmd.split(' '), stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + (stdout2, stderr2) = p2.communicate() + p1.stdout.close() + p1.wait() + if p1.returncode != 0: + stderr1 = p1.stderr.read() + return p1.returncode, '', stderr1 + else: + return p2.returncode, stdout2, stderr2 else: cmd += " < %s" % pipes.quote(target) rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) From 32e609720a962fa948094de03eba4750ab03918b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 25 May 2015 09:22:08 -0700 Subject: [PATCH 105/464] Refactor dump compression and use get_bin_path for finding the compressors --- database/mysql/mysql_db.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index 16ddf93e7a5..71dfc3a1ad3 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -142,14 +142,20 @@ def db_dump(module, host, user, password, db_name, target, all_databases, port, cmd += " --all-databases" else: cmd += " %s" % pipes.quote(db_name) + + path = None if os.path.splitext(target)[-1] == '.gz': - cmd = cmd + ' | gzip > ' + pipes.quote(target) + path = module.get_bin_path('gzip', True) elif os.path.splitext(target)[-1] == '.bz2': - cmd = cmd + ' | bzip2 > ' + pipes.quote(target) + path = module.get_bin_path('bzip2', True) elif os.path.splitext(target)[-1] == '.xz': - cmd = cmd + ' | xz > ' + pipes.quote(target) + path = module.get_bin_path('xz', True) + + if path: + cmd = '%s | %s > %s' % (cmd, path, pipes.quote(target)) else: cmd += " > %s" % pipes.quote(target) + rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) return rc, stdout, stderr From f634c10636428c78575aa4d54b19dfd53e812dae Mon Sep 17 00:00:00 2001 From: fdupoux Date: Mon, 25 May 2015 20:10:12 +0100 Subject: [PATCH 106/464] Simplify code which prepares the decompression command --- database/mysql/mysql_db.py | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index b28ef219f63..b5af48408fd 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -167,10 +167,8 @@ def db_import(module, host, user, password, db_name, target, all_databases, port if not all_databases: cmd += " -D %s" % pipes.quote(db_name) if os.path.splitext(target)[-1] == '.gz': - zcat_path = module.get_bin_path('zcat') - if not zcat_path: - module.fail_json(msg="zcat command not found") - p1 = subprocess.Popen([zcat_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + comp_prog_path = module.get_bin_path('gzip', required=True) + p1 = subprocess.Popen([comp_prog_path, '-dc', target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p2 = subprocess.Popen(cmd.split(' '), stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout2, stderr2) = p2.communicate() p1.stdout.close() @@ -181,10 +179,8 @@ def db_import(module, host, user, password, db_name, target, all_databases, port else: return p2.returncode, stdout2, stderr2 elif os.path.splitext(target)[-1] == '.bz2': - bzcat_path = module.get_bin_path('bzcat') - if not bzcat_path: - module.fail_json(msg="bzcat command not found") - p1 = subprocess.Popen([bzcat_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + comp_prog_path = module.get_bin_path('bzip2', required=True) + p1 = subprocess.Popen([comp_prog_path, '-dc', target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p2 = subprocess.Popen(cmd.split(' '), stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout2, stderr2) = p2.communicate() p1.stdout.close() @@ -195,10 +191,8 @@ def db_import(module, host, user, password, db_name, target, all_databases, port else: return p2.returncode, stdout2, stderr2 elif os.path.splitext(target)[-1] == '.xz': - xzcat_path = module.get_bin_path('xzcat') - if not xzcat_path: - module.fail_json(msg="xzcat command not found") - p1 = subprocess.Popen([xzcat_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + comp_prog_path = module.get_bin_path('xz', required=True) + p1 = subprocess.Popen([comp_prog_path, '-dc', target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p2 = subprocess.Popen(cmd.split(' '), stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout2, stderr2) = p2.communicate() p1.stdout.close() From 380b122d5a1ec2928b2ec10eb67c1908038fb7ae Mon Sep 17 00:00:00 2001 From: fdupoux Date: Mon, 25 May 2015 20:22:49 +0100 Subject: [PATCH 107/464] Refactoring to avoid duplication of code which manages the decompression of database dumps in various formats --- database/mysql/mysql_db.py | 26 +++++--------------------- 1 file changed, 5 insertions(+), 21 deletions(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index b5af48408fd..fd6f82b4a5f 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -166,32 +166,16 @@ def db_import(module, host, user, password, db_name, target, all_databases, port cmd += " --host=%s --port=%i" % (pipes.quote(host), port) if not all_databases: cmd += " -D %s" % pipes.quote(db_name) + + comp_prog_path = None if os.path.splitext(target)[-1] == '.gz': comp_prog_path = module.get_bin_path('gzip', required=True) - p1 = subprocess.Popen([comp_prog_path, '-dc', target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - p2 = subprocess.Popen(cmd.split(' '), stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (stdout2, stderr2) = p2.communicate() - p1.stdout.close() - p1.wait() - if p1.returncode != 0: - stderr1 = p1.stderr.read() - return p1.returncode, '', stderr1 - else: - return p2.returncode, stdout2, stderr2 elif os.path.splitext(target)[-1] == '.bz2': comp_prog_path = module.get_bin_path('bzip2', required=True) - p1 = subprocess.Popen([comp_prog_path, '-dc', target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - p2 = subprocess.Popen(cmd.split(' '), stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - (stdout2, stderr2) = p2.communicate() - p1.stdout.close() - p1.wait() - if p1.returncode != 0: - stderr1 = p1.stderr.read() - return p1.returncode, '', stderr1 - else: - return p2.returncode, stdout2, stderr2 elif os.path.splitext(target)[-1] == '.xz': comp_prog_path = module.get_bin_path('xz', required=True) + + if comp_prog_path: p1 = subprocess.Popen([comp_prog_path, '-dc', target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) p2 = subprocess.Popen(cmd.split(' '), stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout2, stderr2) = p2.communicate() @@ -205,7 +189,7 @@ def db_import(module, host, user, password, db_name, target, all_databases, port else: cmd += " < %s" % pipes.quote(target) rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) - return rc, stdout, stderr + return rc, stdout, stderr def db_create(cursor, db, encoding, collation): query_params = dict(enc=encoding, collate=collation) From 71ad56b15136440a21db234b118c3b984f196dda Mon Sep 17 00:00:00 2001 From: whiter Date: Fri, 15 May 2015 15:35:23 +1000 Subject: [PATCH 108/464] Fix for issue #1332 - when instance is has finished deleting and get_db_instance returns None, exit gracefully --- cloud/amazon/rds.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 34f45218f0a..5b152ace6b2 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -624,6 +624,8 @@ def await_resource(conn, resource, status, module): if resource.name is None: module.fail_json(msg="Problem with instance %s" % resource.instance) resource = conn.get_db_instance(resource.name) + if resource is None: + break return resource From 8892aa2bf430f25be124b655cc12c975cd1bfbdc Mon Sep 17 00:00:00 2001 From: Johann Schmitz Date: Tue, 26 May 2015 16:13:40 +0200 Subject: [PATCH 109/464] Improved output of mysql_db plugin Show error number and error description on connect error to ease debugging. --- database/mysql/mysql_db.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index 05db6beb07f..b9d862b56ac 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -346,11 +346,11 @@ def main(): db_connection = MySQLdb.connect(host=module.params["login_host"], port=login_port, user=login_user, passwd=login_password, db=connect_to_db) cursor = db_connection.cursor() except Exception, e: + errno, errstr = e.args if "Unknown database" in str(e): - errno, errstr = e.args module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) else: - module.fail_json(msg="unable to connect, check login credentials (login_user, and login_password, which can be defined in ~/.my.cnf), check that mysql socket exists and mysql server is running") + module.fail_json(msg="unable to connect, check login credentials (login_user, and login_password, which can be defined in ~/.my.cnf), check that mysql socket exists and mysql server is running (ERROR: %s %s)" % (errno, errstr)) changed = False if db_exists(cursor, db): From 7810a898e8ecd24011d19f5e6c48514342ec0905 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 26 May 2015 09:57:38 -0400 Subject: [PATCH 110/464] sleep when only doing a time delay to avoid cpu churn --- utilities/logic/wait_for.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/utilities/logic/wait_for.py b/utilities/logic/wait_for.py index 1f549570516..4aa5bc78281 100644 --- a/utilities/logic/wait_for.py +++ b/utilities/logic/wait_for.py @@ -337,12 +337,15 @@ def main(): if params['exclude_hosts'] is not None and state != 'drained': module.fail_json(msg="exclude_hosts should only be with state=drained") + start = datetime.datetime.now() if delay: time.sleep(delay) - if state in [ 'stopped', 'absent' ]: + if not port and not path and state != 'drained': + time.sleep(timeout) + elif state in [ 'stopped', 'absent' ]: ### first wait for the stop condition end = start + datetime.timedelta(seconds=timeout) @@ -365,6 +368,8 @@ def main(): time.sleep(1) except: break + else: + time.sleep(1) else: elapsed = datetime.datetime.now() - start if port: @@ -427,6 +432,8 @@ def main(): except: time.sleep(1) pass + else: + time.sleep(1) else: elapsed = datetime.datetime.now() - start if port: From d02ef57815f8f047f3ad4e6f382890fcaf81f134 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 26 May 2015 13:43:29 -0400 Subject: [PATCH 111/464] minor doc fix --- system/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index 41c00ea9171..856361ac956 100644 --- a/system/service.py +++ b/system/service.py @@ -75,7 +75,7 @@ options: must_exist: required: false default: true - version_added: "1.9" + version_added: "2.0" description: - Avoid a module failure if the named service does not exist. Useful for opportunistically starting/stopping/restarting a list of From 74b7ce9dcf93b1f37597ded6e6990d1e993a3b68 Mon Sep 17 00:00:00 2001 From: Robin Miller Date: Tue, 5 May 2015 17:54:02 -0500 Subject: [PATCH 112/464] Only revoke actually granted permissions, not 'ALL'. This prevents errors when the login_user does not have 'ALL' permissions, and the 'priv' value contains fewer permissions than are held by an existing user. This is particularly an issue when using an Amazon Web Services RDS instance, as there is no (accessible) user with 'ALL' permissions on *.*. --- database/mysql/mysql_user.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index ba5b6370f1b..824f2b47d3f 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -245,7 +245,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs): grant_option = True if db_table not in new_priv: if user != "root" and "PROXY" not in priv and not append_privs: - privileges_revoke(cursor, user,host,db_table,grant_option) + privileges_revoke(cursor, user,host,db_table,priv,grant_option) changed = True # If the user doesn't currently have any privileges on a db.table, then @@ -262,7 +262,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs): priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table]) if (len(priv_diff) > 0): if not append_privs: - privileges_revoke(cursor, user,host,db_table,grant_option) + privileges_revoke(cursor, user,host,db_table,curr_priv[db_table],grant_option) privileges_grant(cursor, user,host,db_table,new_priv[db_table]) changed = True @@ -342,7 +342,7 @@ def privileges_unpack(priv): return output -def privileges_revoke(cursor, user,host,db_table,grant_option): +def privileges_revoke(cursor, user,host,db_table,priv,grant_option): # Escape '%' since mysql db.execute() uses a format string db_table = db_table.replace('%', '%%') if grant_option: @@ -350,7 +350,8 @@ def privileges_revoke(cursor, user,host,db_table,grant_option): query.append("FROM %s@%s") query = ' '.join(query) cursor.execute(query, (user, host)) - query = ["REVOKE ALL PRIVILEGES ON %s" % mysql_quote_identifier(db_table, 'table')] + priv_string = ",".join(filter(lambda x: x not in [ 'GRANT', 'REQUIRESSL' ], priv)) + query = ["REVOKE %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))] query.append("FROM %s@%s") query = ' '.join(query) cursor.execute(query, (user, host)) From cda7a9be1592c82fd9c824185507d4ad3cbb5a5b Mon Sep 17 00:00:00 2001 From: Robin Miller Date: Tue, 26 May 2015 12:36:46 -0500 Subject: [PATCH 113/464] Replaced lambda functions with list comprehensions. --- database/mysql/mysql_user.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 824f2b47d3f..afebd0a00c3 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -350,7 +350,7 @@ def privileges_revoke(cursor, user,host,db_table,priv,grant_option): query.append("FROM %s@%s") query = ' '.join(query) cursor.execute(query, (user, host)) - priv_string = ",".join(filter(lambda x: x not in [ 'GRANT', 'REQUIRESSL' ], priv)) + priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')]) query = ["REVOKE %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))] query.append("FROM %s@%s") query = ' '.join(query) @@ -360,7 +360,7 @@ def privileges_grant(cursor, user,host,db_table,priv): # Escape '%' since mysql db.execute uses a format string and the # specification of db and table often use a % (SQL wildcard) db_table = db_table.replace('%', '%%') - priv_string = ",".join(filter(lambda x: x not in [ 'GRANT', 'REQUIRESSL' ], priv)) + priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')]) query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))] query.append("TO %s@%s") if 'GRANT' in priv: From 898f47e55702534fbfd569e28517afedbbf00c5a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 26 May 2015 15:22:39 -0400 Subject: [PATCH 114/464] stat doc fix fixes #1371 --- files/stat.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/files/stat.py b/files/stat.py index ee3998f5f75..798a560369e 100644 --- a/files/stat.py +++ b/files/stat.py @@ -233,13 +233,13 @@ stat: md5: description: md5 hash of the path returned: success, path exists and user can read stats and path supports hashing and md5 is supported - type: boolean - sample: True + type: string + sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0 checksum: description: hash of the path returned: success, path exists and user can read stats and path supports hashing - type: boolean - sample: True + type: string + sample: 50ba294cdf28c0d5bcde25708df53346825a429f pw_name: description: User name of owner returned: success, path exists and user can read stats and installed python supports it From db9ab9b2629f00350a743a4eca72fb5ee8dc8c77 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Mon, 18 May 2015 21:53:20 -0400 Subject: [PATCH 115/464] escapeds changes fixed merge conflict remove uneeded regexs arrays --- database/mysql/mysql_user.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index afebd0a00c3..a3e24261c77 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -320,12 +320,8 @@ def privileges_unpack(priv): output = {} for item in priv.strip().split('/'): pieces = item.strip().split(':') - if '.' in pieces[0]: - pieces[0] = pieces[0].split('.') - for idx, piece in enumerate(pieces): - if pieces[0][idx] != "*": - pieces[0][idx] = "`" + pieces[0][idx] + "`" - pieces[0] = '.'.join(pieces[0]) + dbpriv = pieces[0].rsplit(".", 1) + pieces[0] = "`%s`.%s" % (dbpriv[0].strip('`'), dbpriv[1]) output[pieces[0]] = pieces[1].upper().split(',') new_privs = frozenset(output[pieces[0]]) From 41049042de1fc302f05d668b58617a6d2b26a2ea Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Fri, 22 May 2015 18:57:06 -0400 Subject: [PATCH 116/464] remove blank lines from htpasswd file used standard mktemp() --- web_infrastructure/htpasswd.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/web_infrastructure/htpasswd.py b/web_infrastructure/htpasswd.py index 4a72ea37fec..03cd6a5b253 100644 --- a/web_infrastructure/htpasswd.py +++ b/web_infrastructure/htpasswd.py @@ -198,6 +198,30 @@ def main(): if not passlib_installed: module.fail_json(msg="This module requires the passlib Python library") + # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error. + f = open(path, "r") + try: + lines=f.readlines() + finally: + f.close + + # If the file gets edited, it returns true, so only edit the file if it has blank lines + strip = False + for line in lines: + if not line.strip(): + strip = True + + if strip: + # If check mode, create a temporary file + if check_mode: + temp = tempfile.NamedTemporaryFile() + path = temp.name + f = open(path,"w") + try: + [f.write(line) for line in lines if line.strip() ] + finally: + f.close + try: if state == 'present': (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode) From fb41bdfc1e782463f46224e714ea8a74f3fe6e7b Mon Sep 17 00:00:00 2001 From: Patrik Lundin Date: Wed, 27 May 2015 08:05:30 +0200 Subject: [PATCH 117/464] service: Fix comments in OpenBsdService class. No functional change. --- system/service.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/service.py b/system/service.py index 856361ac956..3299b614d52 100644 --- a/system/service.py +++ b/system/service.py @@ -1059,7 +1059,7 @@ class OpenBsdService(Service): getdef_string = stdout.rstrip() - # Depending on the service the string returned from 'default' may be + # Depending on the service the string returned from 'getdef' may be # either a set of flags or the boolean YES/NO if getdef_string == "YES" or getdef_string == "NO": default_flags = '' @@ -1073,7 +1073,7 @@ class OpenBsdService(Service): get_string = stdout.rstrip() - # Depending on the service the string returned from 'getdef/get' may be + # Depending on the service the string returned from 'get' may be # either a set of flags or the boolean YES/NO if get_string == "YES" or get_string == "NO": current_flags = '' From e2773f75541f1a504329eb17286621574df4e6f5 Mon Sep 17 00:00:00 2001 From: Jonathan Davila Date: Mon, 9 Mar 2015 18:49:39 -0400 Subject: [PATCH 118/464] author fix --- cloud/amazon/iam_policy.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index 277877a7254..284c765c104 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify @@ -19,7 +19,7 @@ module: iam_policy short_description: Manage IAM policies for users, groups, and roles description: - Allows uploading or removing IAM policies for IAM users, groups or roles. -version_added: "1.9" +version_added: "2.0" options: iam_type: description: @@ -69,11 +69,11 @@ options: default: null aliases: [ 'ec2_access_key', 'access_key' ] - requirements: [ "boto" ] notes: - 'Currently boto does not support the removal of Managed Policies, the module will not work removing/adding managed policies.' -author: Jonathan I. Davila and Paul Seiffert +author: Jonathan I. Davila +extends_documentation_fragment: aws ''' EXAMPLES = ''' @@ -322,4 +322,4 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() \ No newline at end of file +main() From 5f9592248a7e8dbd4f61f3cf4fc0025a5f0a1d8f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 27 May 2015 07:03:29 -0700 Subject: [PATCH 119/464] Minor fixups found during review of #582 --- cloud/amazon/s3.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 300461dc8c4..310bda3bfbc 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -235,7 +235,7 @@ def download_s3file(module, s3, bucket, obj, dest, retries): # more to get that count of loops. bucket = s3.lookup(bucket) key = bucket.lookup(obj) - for x in xrange(0, retries + 1): + for x in range(0, retries + 1): try: key.get_contents_to_filename(dest) module.exit_json(msg="GET operation complete", changed=True) @@ -243,7 +243,7 @@ def download_s3file(module, s3, bucket, obj, dest, retries): module.fail_json(msg= str(e)) except SSLError as e: # actually fail on last pass through the loop. - if x == retries: + if x >= retries: module.fail_json(msg="s3 download failed; %s" % e) # otherwise, try again, this may be a transient timeout. pass @@ -295,7 +295,7 @@ def main(): s3_url = dict(aliases=['S3_URL']), overwrite = dict(aliases=['force'], default='always'), metadata = dict(type='dict'), - retries = dict(aliases=['retry'], type='str', default=0), + retries = dict(aliases=['retry'], type='int', default=0), ), ) module = AnsibleModule(argument_spec=argument_spec) @@ -313,7 +313,7 @@ def main(): s3_url = module.params.get('s3_url') overwrite = module.params.get('overwrite') metadata = module.params.get('metadata') - retries = int(module.params.get('retries')) + retries = module.params.get('retries') if overwrite not in ['always', 'never', 'different']: if module.boolean(overwrite): From 40eef6c3ecd940817acd900e5451afd7883098e4 Mon Sep 17 00:00:00 2001 From: billwanjohi Date: Tue, 24 Feb 2015 18:24:23 +0000 Subject: [PATCH 120/464] s3 put: add support for server-side encryption - added 'encrypt' boolean option - reordered module options alphanumerically --- cloud/amazon/s3.py | 104 ++++++++++++++++++++++++++------------------- 1 file changed, 60 insertions(+), 44 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 310bda3bfbc..50ef9d03f54 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -22,61 +22,63 @@ description: - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and deleting both objects and buckets, retrieving objects as files or strings and generating download links. This module has a dependency on python-boto. version_added: "1.1" options: - bucket: + aws_access_key: description: - - Bucket name. - required: true - default: null - aliases: [] - object: - description: - - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. + - AWS access key id. If not set then the value of the AWS_ACCESS_KEY environment variable is used. required: false default: null - aliases: [] - version_added: "1.3" - src: + aliases: [ 'ec2_access_key', 'access_key' ] + aws_secret_key: description: - - The source file path when performing a PUT operation. + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. required: false default: null + aliases: ['ec2_secret_key', 'secret_key'] + bucket: + description: Bucket name. + required: true + default: null aliases: [] - version_added: "1.3" dest: description: - The destination file path when downloading an object/key with a GET operation. required: false aliases: [] version_added: "1.3" - overwrite: + encrypt: description: - - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. Starting with (v2.0) the valid values for this parameter are (always, never, different) and boolean is still accepted for backward compatibility, If the value set to (different) the file would be uploaded/downloaded only if the checksums are different. + - When set for PUT mode, asks for server-side encryption required: false - default: always - version_added: "1.2" - mode: - description: - - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket) and delete (bucket). - required: true - default: null - aliases: [] + default: no expiration: description: - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation. required: false default: 600 aliases: [] - s3_url: - description: - - "S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS" - default: null - aliases: [ S3_URL ] metadata: description: - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. required: false default: null version_added: "1.6" + mode: + description: + - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket) and delete (bucket). + required: true + default: null + aliases: [] + object: + description: + - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. + required: false + default: null + overwrite: + description: + - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. + required: false + default: true + version_added: "1.2" region: description: - "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect." @@ -89,6 +91,16 @@ options: required: false default: 0 version_added: "2.0" + s3_url: + description: S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS + default: null + aliases: [ S3_URL ] + src: + description: The source file path when performing a PUT operation. + required: false + default: null + aliases: [] + version_added: "1.3" requirements: [ "boto" ] author: Lester Wade, Ralph Tice @@ -216,7 +228,8 @@ def path_check(path): else: return False -def upload_s3file(module, s3, bucket, obj, src, expiry, metadata): + +def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt): try: bucket = s3.lookup(bucket) key = bucket.new_key(obj) @@ -224,7 +237,7 @@ def upload_s3file(module, s3, bucket, obj, src, expiry, metadata): for meta_key in metadata.keys(): key.set_metadata(meta_key, metadata[meta_key]) - key.set_contents_from_filename(src) + key.set_contents_from_filename(src, encrypt_key=encrypt) url = key.generate_url(expiry) module.exit_json(msg="PUT operation complete", url=url, changed=True) except s3.provider.storage_copy_error, e: @@ -283,19 +296,21 @@ def is_walrus(s3_url): else: return False + def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( bucket = dict(required=True), - object = dict(), - src = dict(), dest = dict(default=None), - mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr'], required=True), + encrypt = dict(default=True, type='bool'), expiry = dict(default=600, aliases=['expiration']), - s3_url = dict(aliases=['S3_URL']), - overwrite = dict(aliases=['force'], default='always'), metadata = dict(type='dict'), + mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr'], required=True), + object = dict(), + overwrite = dict(aliases=['force'], default='always'), retries = dict(aliases=['retry'], type='int', default=0), + s3_url = dict(aliases=['S3_URL']), + src = dict(), ), ) module = AnsibleModule(argument_spec=argument_spec) @@ -304,16 +319,17 @@ def main(): module.fail_json(msg='boto required for this module') bucket = module.params.get('bucket') - obj = module.params.get('object') - src = module.params.get('src') + encrypt = module.params.get('encrypt') + expiry = int(module.params['expiry']) if module.params.get('dest'): dest = os.path.expanduser(module.params.get('dest')) + metadata = module.params.get('metadata') mode = module.params.get('mode') - expiry = int(module.params['expiry']) - s3_url = module.params.get('s3_url') + obj = module.params.get('object') overwrite = module.params.get('overwrite') - metadata = module.params.get('metadata') retries = module.params.get('retries') + s3_url = module.params.get('s3_url') + src = module.params.get('src') if overwrite not in ['always', 'never', 'different']: if module.boolean(overwrite): @@ -437,24 +453,24 @@ def main(): if md5_local == md5_remote: sum_matches = True if overwrite == 'always': - upload_s3file(module, s3, bucket, obj, src, expiry, metadata) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt) else: get_download_url(module, s3, bucket, obj, expiry, changed=False) else: sum_matches = False if overwrite in ('always', 'different'): - upload_s3file(module, s3, bucket, obj, src, expiry, metadata) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt) else: module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.") # If neither exist (based on bucket existence), we can create both. if bucketrtn is False and pathrtn is True: create_bucket(module, s3, bucket, location) - upload_s3file(module, s3, bucket, obj, src, expiry, metadata) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt) # If bucket exists but key doesn't, just upload. if bucketrtn is True and pathrtn is True and keyrtn is False: - upload_s3file(module, s3, bucket, obj, src, expiry, metadata) + upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt) # Support for deleting an object if we have both params. if mode == 'delete': From 5f1d88a8299ca11bc9d1cf64f22eaa03fd8e4565 Mon Sep 17 00:00:00 2001 From: Ryan Walls Date: Wed, 27 May 2015 12:33:11 -0600 Subject: [PATCH 121/464] Update docker module to look at log_driver variable when deciding if container configuration has changed. --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index e4c27797b71..d765ce00c66 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1129,7 +1129,7 @@ class DockerManager(object): # LOG_DRIVER expected_log_driver = set(self.module.params.get('log_driver') or []) - actual_log_driver = set(container['HostConfig']['LogConfig'] or []) + actual_log_driver = set(container['HostConfig']['LogConfig']['Type'] or []) if actual_log_driver != expected_log_driver: self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver)) differing.append(container) From 2b5e932cfb4df42f46812aee2476fdf5aabab172 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 27 May 2015 20:27:51 -0700 Subject: [PATCH 122/464] Fix for the new import code when password is empty --- database/mysql/mysql_db.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index b9d862b56ac..a76c4526727 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -164,14 +164,19 @@ def db_import(module, host, user, password, db_name, target, all_databases, port if not os.path.exists(target): return module.fail_json(msg="target %s does not exist on the host" % target) - cmd = module.get_bin_path('mysql', True) - cmd += " --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password)) + cmd = [module.get_bin_path('mysql', True)] + if user: + cmd.append("--user=%s" % pipes.quote(user)) + if password: + cmd.append("--password=%s" % pipes.quote(password)) if socket is not None: - cmd += " --socket=%s" % pipes.quote(socket) + cmd.append("--socket=%s" % pipes.quote(socket)) else: - cmd += " --host=%s --port=%i" % (pipes.quote(host), port) + cmd.append("--host=%s" % pipes.quote(host)) + cmd.append("--port=%i" % port) if not all_databases: - cmd += " -D %s" % pipes.quote(db_name) + cmd.append("-D") + cmd.append(pipes.quote(db_name)) comp_prog_path = None if os.path.splitext(target)[-1] == '.gz': @@ -183,7 +188,7 @@ def db_import(module, host, user, password, db_name, target, all_databases, port if comp_prog_path: p1 = subprocess.Popen([comp_prog_path, '-dc', target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - p2 = subprocess.Popen(cmd.split(' '), stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout2, stderr2) = p2.communicate() p1.stdout.close() p1.wait() @@ -193,6 +198,7 @@ def db_import(module, host, user, password, db_name, target, all_databases, port else: return p2.returncode, stdout2, stderr2 else: + cmd = ' '.join(cmd) cmd += " < %s" % pipes.quote(target) rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) return rc, stdout, stderr From cc221aa1a2d1fac9d909d28feda246e06353c3d9 Mon Sep 17 00:00:00 2001 From: Alan Scherger Date: Wed, 27 May 2015 23:12:34 -0500 Subject: [PATCH 123/464] fix docs; only delete network if fwname is not provided --- cloud/google/gce_net.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/google/gce_net.py b/cloud/google/gce_net.py index 079891c5e10..fb9a186f66c 100644 --- a/cloud/google/gce_net.py +++ b/cloud/google/gce_net.py @@ -75,7 +75,7 @@ options: aliases: [] state: description: - - desired state of the persistent disk + - desired state of the network or firewall required: false default: "present" choices: ["active", "present", "absent", "deleted"] @@ -264,7 +264,7 @@ def main(): if fw: gce.ex_destroy_firewall(fw) changed = True - if name: + elif name: json_output['name'] = name network = None try: From ba35cb81736f390ec8ba8af362aa10c8f299cd77 Mon Sep 17 00:00:00 2001 From: whiter Date: Thu, 28 May 2015 15:27:46 +1000 Subject: [PATCH 124/464] Doc update to highlight use of body_type parameter --- network/basics/uri.py | 45 ++++++++++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index a3f77919c0f..41efe66bc55 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -151,27 +151,40 @@ EXAMPLES = ''' # Create a JIRA issue - -- uri: url=https://your.jira.example.com/rest/api/2/issue/ - method=POST user=your_username password=your_pass - body="{{ lookup('file','issue.json') }}" force_basic_auth=yes - status_code=201 HEADER_Content-Type="application/json" +- uri: + url: https://your.jira.example.com/rest/api/2/issue/ + method: POST + user: your_username + password: your_pass + body: "{{ lookup('file','issue.json') }}" + force_basic_auth: yes + status_code: 201 + body_format: json # Login to a form based webpage, then use the returned cookie to # access the app in later tasks - -- uri: url=https://your.form.based.auth.examle.com/index.php - method=POST body="name=your_username&password=your_password&enter=Sign%20in" - status_code=302 HEADER_Content-Type="application/x-www-form-urlencoded" - register: login - -- uri: url=https://your.form.based.auth.example.com/dashboard.php - method=GET return_content=yes HEADER_Cookie="{{login.set_cookie}}" +- uri: + url: https://your.form.based.auth.examle.com/index.php + method: POST + body: "name=your_username&password=your_password&enter=Sign%20in" + status_code: 302 + HEADER_Content-Type: "application/x-www-form-urlencoded" + register: login + +- uri: + url: https://your.form.based.auth.example.com/dashboard.php + method: GET + return_content: yes + HEADER_Cookie: "{{login.set_cookie}}" # Queue build of a project in Jenkins: - -- uri: url=http://{{jenkins.host}}/job/{{jenkins.job}}/build?token={{jenkins.token}} - method=GET user={{jenkins.user}} password={{jenkins.password}} force_basic_auth=yes status_code=201 +- uri: + url: "http://{{ jenkins.host }}/job/{{ jenkins.job }}/build?token={{ jenkins.token }}" + method: GET + user: "{{ jenkins.user }}" + password: "{{ jenkins.password }}" + force_basic_auth: yes + status_code: 201 ''' From c95717afe582ae889f781e44e58683b77657d1e9 Mon Sep 17 00:00:00 2001 From: Ryan Walls Date: Thu, 28 May 2015 11:36:20 -0600 Subject: [PATCH 125/464] Set default "log_driver" option to None in docker module. --- cloud/docker/docker.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index d765ce00c66..e22f8ff3edd 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -100,6 +100,7 @@ options: "none" disables any logging for the container. docker logs won't be available with this driver. "syslog" Syslog logging driver for Docker. Writes log messages to syslog. docker logs command is not available for this logging driver. + If not defined explicitly, the Docker daemon's default ("json-file") will apply. Requires docker >= 1.6.0. required: false default: json-file @@ -1510,7 +1511,7 @@ def main(): net = dict(default=None), pid = dict(default=None), insecure_registry = dict(default=False, type='bool'), - log_driver = dict(default='json-file', choices=['json-file', 'none', 'syslog']), + log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']), ), required_together = ( ['tls_client_cert', 'tls_client_key'], From c64a3eb03d0f9adb513c7ad4e8e0b222c0052e7e Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Thu, 28 May 2015 19:08:52 +0100 Subject: [PATCH 126/464] Update vsphere_guest.py --- cloud/vmware/vsphere_guest.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 8e1aa686701..8d1b7946688 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -195,7 +195,6 @@ EXAMPLES = ''' hostname: esx001.mydomain.local # Deploy a guest from a template -# No reconfiguration of the destination guest is done at this stage, a reconfigure is needed to adjust RAM/CPU. - vsphere_guest: vcenter_hostname: vcenter.mydomain.local username: myuser From 718c13bdf2447d22a961c9023c8b73f98dd7b5f4 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 28 May 2015 16:14:20 -0400 Subject: [PATCH 127/464] Add OpenStack Client Config module All of the ansible OpenStack modules are driven by a clouds.yaml config file which is processed by os-client-config. Expose the data returned by that library to enable playbooks to iterate over available clouds. --- cloud/openstack/os_client_config.py | 70 +++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 cloud/openstack/os_client_config.py diff --git a/cloud/openstack/os_client_config.py b/cloud/openstack/os_client_config.py new file mode 100644 index 00000000000..9ee7d3ef394 --- /dev/null +++ b/cloud/openstack/os_client_config.py @@ -0,0 +1,70 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +import os_client_config +from os_client_config import exceptions + +DOCUMENTATION = ''' +--- +module: os_client_config +short_description: Get OpenStack Client config +description: + - Get I(openstack) client config data from clouds.yaml or environment +options: + regions: + description: + - Include regions in the returned data + required: false + default: 'yes' +version_added: "2.0" +requirements: [ os-client-config ] +author: Monty Taylor +''' + +EXAMPLES = ''' +# Inject facts about OpenStack clouds +- os-client-config +''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + regions = dict(default=True, required=False, type='bool'), + action = dict(default='list', choices=['list']), + ), + ) + p = module.params + + try: + config = os_client_config.OpenStackConfig() + clouds = {} + for cloud in config.get_all_clouds(): + if p['regions']: + cloud_region = clouds.get(cloud.name, {}) + cloud_region[cloud.region] = cloud.config + clouds[cloud.name] = cloud_region + else: + clouds[cloud.name] = cloud.config + module.exit_json(clouds=clouds) + except exceptions.OpenStackConfigException as e: + module.fail_json(msg=str(e)) + +# import module snippets +from ansible.module_utils.basic import * + +main() From 7fea93835c172d23638959cbe2d00a3be8d14557 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 28 May 2015 15:02:28 -0700 Subject: [PATCH 128/464] Change uri module to validate ssl certs by default --- network/basics/uri.py | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index 41efe66bc55..5396a0a55ff 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -132,6 +132,15 @@ options: description: - all arguments accepted by the M(file) module also work here required: false + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only + set to C(no) used on personally controlled sites using self-signed + certificates. Prior to 1.9.2 the code defaulted to C(no). + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: '1.9.2' # informational: requirements for nodes requirements: [ urlparse, httplib2 ] @@ -163,20 +172,21 @@ EXAMPLES = ''' # Login to a form based webpage, then use the returned cookie to # access the app in later tasks + - uri: url: https://your.form.based.auth.examle.com/index.php method: POST body: "name=your_username&password=your_password&enter=Sign%20in" status_code: 302 HEADER_Content-Type: "application/x-www-form-urlencoded" - register: login + register: login - uri: url: https://your.form.based.auth.example.com/dashboard.php method: GET return_content: yes HEADER_Cookie: "{{login.set_cookie}}" - + # Queue build of a project in Jenkins: - uri: url: "http://{{ jenkins.host }}/job/{{ jenkins.job }}/build?token={{ jenkins.token }}" @@ -257,7 +267,7 @@ def url_filename(url): return fn -def uri(module, url, dest, user, password, body, body_format, method, headers, redirects, socket_timeout): +def uri(module, url, dest, user, password, body, body_format, method, headers, redirects, socket_timeout, validate_certs): # To debug #httplib2.debug = 4 @@ -273,7 +283,8 @@ def uri(module, url, dest, user, password, body, body_format, method, headers, r follow_all_redirects = False # Create a Http object and set some default options. - h = httplib2.Http(disable_ssl_certificate_validation=True, timeout=socket_timeout) + disable_validation = not validate_certs + h = httplib2.Http(disable_ssl_certificate_validation=disable_validation, timeout=socket_timeout) h.follow_all_redirects = follow_all_redirects h.follow_redirects = follow_redirects h.forward_authorization_headers = True @@ -360,6 +371,7 @@ def main(): removes = dict(required=False, default=None), status_code = dict(required=False, default=[200], type='list'), timeout = dict(required=False, default=30, type='int'), + validate_certs = dict(required=False, default=False, type='bool'), ), check_invalid_arguments=False, add_file_common_args=True @@ -384,6 +396,7 @@ def main(): removes = module.params['removes'] status_code = [int(x) for x in list(module.params['status_code'])] socket_timeout = module.params['timeout'] + validate_certs = module.params['validate_certs'] dict_headers = {} @@ -425,7 +438,7 @@ def main(): # Make the request - resp, content, dest = uri(module, url, dest, user, password, body, body_format, method, dict_headers, redirects, socket_timeout) + resp, content, dest = uri(module, url, dest, user, password, body, body_format, method, dict_headers, redirects, socket_timeout, validate_certs) resp['status'] = int(resp['status']) # Write the file out if requested From 5983d64d7728ea88ef27606e95e4aa34cde5ff46 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 28 May 2015 15:33:21 -0700 Subject: [PATCH 129/464] Properly flip default for verifying server cert. Add nice error messages when the cert is invalid --- network/basics/uri.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index 5396a0a55ff..6138edbf94b 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -350,6 +350,10 @@ def uri(module, url, dest, user, password, body, body_format, method, headers, r module.fail_json(msg="The server requested a type of HMACDigest authentication that we are unfamiliar with.") except httplib2.UnimplementedHmacDigestAuthOptionError: module.fail_json(msg="The server requested a type of HMACDigest authentication that we are unfamiliar with.") + except httplib2.CertificateHostnameMismatch: + module.fail_json(msg="The server's certificate does not match with its hostname.") + except httplib2.SSLHandshakeError: + module.fail_json(msg="Unable to validate server's certificate against available CA certs.") except socket.error, e: module.fail_json(msg="Socket error: %s to %s" % (e, url)) @@ -371,7 +375,7 @@ def main(): removes = dict(required=False, default=None), status_code = dict(required=False, default=[200], type='list'), timeout = dict(required=False, default=30, type='int'), - validate_certs = dict(required=False, default=False, type='bool'), + validate_certs = dict(required=False, default=True, type='bool'), ), check_invalid_arguments=False, add_file_common_args=True From 7a5ad0c7f3b41edcc67867d88fd1b6d2c04922aa Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Fri, 22 May 2015 23:16:56 -0400 Subject: [PATCH 130/464] add :// url support for EL 5 --- packaging/os/yum.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index ac183641a30..b972f5cc502 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -27,6 +27,11 @@ import os import yum import rpm import syslog +import platform +import tempfile +import shutil +from ansible.module_utils.urls import * +from distutils.version import LooseVersion try: from yum.misc import find_unfinished_transactions, find_ts_remaining @@ -486,6 +491,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): res['msg'] = '' res['rc'] = 0 res['changed'] = False + tempdir = tempfile.mkdtemp() for spec in items: pkg = None @@ -508,6 +514,21 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # URL elif '://' in spec: pkg = spec + # Check if Enterprise Linux 5 or less, as yum on those versions do not support installing via url + distribution_version = get_distribution_version() + distribution = platform.dist() + if distribution[0] == "redhat" and LooseVersion(distribution_version) < LooseVersion("6"): + package = os.path.join(tempdir, str(pkg.rsplit('/', 1)[1])) + try: + rsp, info = fetch_url(module, pkg) + data = rsp.read() + f = open(package, 'w') + f.write(data) + f.close() + pkg = package + except Exception, e: + shutil.rmtree(tempdir) + module.fail_json(msg="Failure downloading %s, %s" % (spec, e)) #groups :( elif spec.startswith('@'): @@ -569,6 +590,11 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): cmd = yum_basecmd + ['install', pkg] if module.check_mode: + # Remove rpms downloaded for EL5 via url + try: + shutil.rmtree(tempdir) + except Exception, e: + module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e)) module.exit_json(changed=True) changed = True @@ -600,6 +626,12 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # accumulate any changes res['changed'] |= changed + # Remove rpms downloaded for EL5 via url + try: + shutil.rmtree(tempdir) + except Exception, e: + module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e)) + module.exit_json(**res) From f3277f0f1fd45eca0275c9c38e75210a88f8318e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 29 May 2015 13:37:47 -0700 Subject: [PATCH 131/464] Import ansible module_utils at bottom of file to not mess with line numbers in tracebacks --- packaging/os/yum.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index b972f5cc502..00f77d68dfc 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -30,7 +30,6 @@ import syslog import platform import tempfile import shutil -from ansible.module_utils.urls import * from distutils.version import LooseVersion try: @@ -891,5 +890,7 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.urls import * +if __name__ == '__main__': + main() From f8d8af17cdc72500af8319c96004b86ac702a0a4 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 29 May 2015 18:55:32 -0700 Subject: [PATCH 132/464] Use a list comprehension instead of map and lambda --- database/mysql/mysql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 1b1d4901eff..97150775507 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -323,7 +323,7 @@ def privileges_unpack(priv): dbpriv = pieces[0].rsplit(".", 1) pieces[0] = "`%s`.%s" % (dbpriv[0].strip('`'), dbpriv[1]) - output[pieces[0]] = map(lambda s: s.strip(), pieces[1].upper().split(',')) + output[pieces[0]] = [s.strip() for s in pieces[1].upper().split(',')] new_privs = frozenset(output[pieces[0]]) if not new_privs.issubset(VALID_PRIVS): raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS)) From 88eddb13c01ff6f18c86a7b391ca2478a7fa05c7 Mon Sep 17 00:00:00 2001 From: Ryan Walls Date: Mon, 1 Jun 2015 09:48:24 -0600 Subject: [PATCH 133/464] Update docker module to avoid false positives when containers are first created. Also have the module check for api compatibility before trying to set a "--log-driver" option. --- cloud/docker/docker.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index e22f8ff3edd..977969da03f 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1129,12 +1129,13 @@ class DockerManager(object): # LOG_DRIVER - expected_log_driver = set(self.module.params.get('log_driver') or []) - actual_log_driver = set(container['HostConfig']['LogConfig']['Type'] or []) - if actual_log_driver != expected_log_driver: - self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver)) - differing.append(container) - continue + if self.ensure_capability('log_driver', false) : + expected_log_driver = self.module.params.get('log_driver') or 'json-file' + actual_log_driver = container['HostConfig']['LogConfig']['Type'] + if actual_log_driver != expected_log_driver: + self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver)) + differing.append(container) + continue return differing From ce556a053edab5d9a51d73a9054b9396e48e87e2 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 1 Jun 2015 11:01:17 -0500 Subject: [PATCH 134/464] Return a list of OpenStack clouds The main use of this is to feed jinja templating, so structure the data returned slightly better for that purpose. --- cloud/openstack/os_client_config.py | 31 +++++++++-------------------- 1 file changed, 9 insertions(+), 22 deletions(-) diff --git a/cloud/openstack/os_client_config.py b/cloud/openstack/os_client_config.py index 9ee7d3ef394..09a9e713210 100644 --- a/cloud/openstack/os_client_config.py +++ b/cloud/openstack/os_client_config.py @@ -24,43 +24,30 @@ module: os_client_config short_description: Get OpenStack Client config description: - Get I(openstack) client config data from clouds.yaml or environment -options: - regions: - description: - - Include regions in the returned data - required: false - default: 'yes' version_added: "2.0" requirements: [ os-client-config ] author: Monty Taylor ''' EXAMPLES = ''' -# Inject facts about OpenStack clouds -- os-client-config +# Get list of clouds that do not support security groups +- os-client-config: +- debug: var={{ item }} + with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}" ''' def main(): - module = AnsibleModule( - argument_spec=dict( - regions = dict(default=True, required=False, type='bool'), - action = dict(default='list', choices=['list']), - ), - ) + module = AnsibleModule() p = module.params try: config = os_client_config.OpenStackConfig() - clouds = {} + clouds = [] for cloud in config.get_all_clouds(): - if p['regions']: - cloud_region = clouds.get(cloud.name, {}) - cloud_region[cloud.region] = cloud.config - clouds[cloud.name] = cloud_region - else: - clouds[cloud.name] = cloud.config - module.exit_json(clouds=clouds) + cloud.config['name'] = cloud.name + clouds.append(cloud.config) + module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds))) except exceptions.OpenStackConfigException as e: module.fail_json(msg=str(e)) From 6a63f2a6ba1a3ddd22fcb4d057259740c8df1f6d Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 19 Nov 2014 15:54:47 -0600 Subject: [PATCH 135/464] Command module changes for v2 compatibility --- commands/command.py | 60 ++++++++++----------------------------------- 1 file changed, 13 insertions(+), 47 deletions(-) diff --git a/commands/command.py b/commands/command.py index 131fc4c7ffc..50d329199c5 100644 --- a/commands/command.py +++ b/commands/command.py @@ -154,12 +154,22 @@ def main(): # the command module is the one ansible module that does not take key=value args # hence don't copy this one if you are looking to build others! - module = CommandModule(argument_spec=dict()) + module = AnsibleModule( + argument_spec=dict( + _raw_params = dict(), + _uses_shell = dict(type='bool', default=False), + chdir = dict(), + executable = dict(), + creates = dict(), + removes = dict(), + warn = dict(type='bool', default=True), + ) + ) - shell = module.params['shell'] + shell = module.params['_uses_shell'] chdir = module.params['chdir'] executable = module.params['executable'] - args = module.params['args'] + args = module.params['_raw_params'] creates = module.params['creates'] removes = module.params['removes'] warn = module.params['warn'] @@ -232,48 +242,4 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.splitter import * -# only the command module should ever need to do this -# everything else should be simple key=value - -class CommandModule(AnsibleModule): - - def _handle_aliases(self): - return {} - - def _check_invalid_arguments(self): - pass - - def _load_params(self): - ''' read the input and return a dictionary and the arguments string ''' - args = MODULE_ARGS - params = copy.copy(OPTIONS) - params['shell'] = False - if "#USE_SHELL" in args: - args = args.replace("#USE_SHELL", "") - params['shell'] = True - - items = split_args(args) - - for x in items: - quoted = x.startswith('"') and x.endswith('"') or x.startswith("'") and x.endswith("'") - if '=' in x and not quoted: - # check to see if this is a special parameter for the command - k, v = x.split('=', 1) - v = unquote(v.strip()) - if k in OPTIONS.keys(): - if k == "chdir": - v = os.path.abspath(os.path.expanduser(v)) - if not (os.path.exists(v) and os.path.isdir(v)): - self.fail_json(rc=258, msg="cannot change to directory '%s': path does not exist" % v) - elif k == "executable": - v = os.path.abspath(os.path.expanduser(v)) - if not (os.path.exists(v)): - self.fail_json(rc=258, msg="cannot use executable '%s': file does not exist" % v) - params[k] = v - # Remove any of the above k=v params from the args string - args = PARAM_REGEX.sub('', args) - params['args'] = args.strip() - - return (params, params['args']) - main() From 76c6a5327400ff10fa3cf002875ba6a87a0b40ea Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 14 Jan 2015 08:02:49 -0600 Subject: [PATCH 136/464] Fix missing expanduser on chdir value --- commands/command.py | 1 + 1 file changed, 1 insertion(+) diff --git a/commands/command.py b/commands/command.py index 50d329199c5..6baf35922c2 100644 --- a/commands/command.py +++ b/commands/command.py @@ -178,6 +178,7 @@ def main(): module.fail_json(rc=256, msg="no command given") if chdir: + chdir = os.path.abspath(os.path.expanduser(chdir)) os.chdir(chdir) if creates: From 30fc6f03d67e39caacd0b3e2753feb5fc55eab38 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 30 Apr 2015 12:49:32 -0700 Subject: [PATCH 137/464] Allow playbook specified login_user and login_password to override config file settings --- database/mysql/mysql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 97150775507..286983b38d7 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -183,7 +183,7 @@ class InvalidPrivsError(Exception): # MySQL module specific support methods. # -def connect(module, login_user=None, login_password=None, config_file=''): +def connect(module, login_user=None, login_password=None, config_file='~/.my.cnf'): config = { 'host': module.params['login_host'], 'db': 'mysql' From 2aa793ec0ecd3d9e50ead35ee34e2c343c50800c Mon Sep 17 00:00:00 2001 From: xiaclo Date: Thu, 30 Apr 2015 13:01:00 +1000 Subject: [PATCH 138/464] Fix issue #1156 Fix as suggested in the issue. https://github.com/ansible/ansible-modules-core/issues/1156 --- cloud/amazon/route53.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index d6c758b3974..c9463f20729 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -209,7 +209,7 @@ def main(): command_in = module.params.get('command') zone_in = module.params.get('zone').lower() - ttl_in = module.params.get('ttl') + ttl_in = int(module.params.get('ttl')) record_in = module.params.get('record').lower() type_in = module.params.get('type') value_in = module.params.get('value') From 1fb035a346019cc506b606641cb4ee351c815120 Mon Sep 17 00:00:00 2001 From: xiaclo Date: Fri, 1 May 2015 12:28:47 +1000 Subject: [PATCH 139/464] Update route53.py --- cloud/amazon/route53.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index c9463f20729..d6c758b3974 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -209,7 +209,7 @@ def main(): command_in = module.params.get('command') zone_in = module.params.get('zone').lower() - ttl_in = int(module.params.get('ttl')) + ttl_in = module.params.get('ttl') record_in = module.params.get('record').lower() type_in = module.params.get('type') value_in = module.params.get('value') From ef1c7eef5bf07327f258151b96056c4599c0d89e Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 31 Mar 2015 17:05:43 -0400 Subject: [PATCH 140/464] Add OpenStack Subnet module Also deprecated old quantum_subnet module --- cloud/openstack/os_subnet | 213 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 213 insertions(+) create mode 100644 cloud/openstack/os_subnet diff --git a/cloud/openstack/os_subnet b/cloud/openstack/os_subnet new file mode 100644 index 00000000000..ad3a27c2816 --- /dev/null +++ b/cloud/openstack/os_subnet @@ -0,0 +1,213 @@ +#!/usr/bin/python +#coding: utf-8 -*- + +# (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + + +DOCUMENTATION = ''' +--- +module: os_subnet +short_description: Add/Remove subnet to an OpenStack network +extends_documentation_fragment: openstack +version_added: "1.10" +description: + - Add or Remove a subnet to an OpenStack network +options: + state: + description: + - Indicate desired state of the resource + choices: ['present', 'absent'] + default: present + network_name: + description: + - Name of the network to which the subnet should be attached + required: true + default: None + name: + description: + - The name of the subnet that should be created + required: true + default: None + cidr: + description: + - The CIDR representation of the subnet that should be assigned to the subnet + required: true + default: None + ip_version: + description: + - The IP version of the subnet 4 or 6 + required: false + default: 4 + enable_dhcp: + description: + - Whether DHCP should be enabled for this subnet. + required: false + default: true + gateway_ip: + description: + - The ip that would be assigned to the gateway for this subnet + required: false + default: None + dns_nameservers: + description: + - DNS nameservers for this subnet, comma-separated + required: false + default: None + version_added: "1.4" + allocation_pool_start: + description: + - From the subnet pool the starting address from which the IP should be allocated + required: false + default: None + allocation_pool_end: + description: + - From the subnet pool the last IP that should be assigned to the virtual machines + required: false + default: None +requirements: ["shade"] +''' + +EXAMPLES = ''' +# Create a subnet with the specified network +- os_subnet: state=present username=admin password=admin + project_name=admin + network_name=network1 name=net1subnet cidr=192.168.0.0/24" +''' + +_os_network_id = None + +def _get_net_id(neutron, module): + kwargs = { + 'name': module.params['network_name'], + } + try: + networks = neutron.list_networks(**kwargs) + except Exception, e: + module.fail_json("Error in listing neutron networks: %s" % e.message) + if not networks['networks']: + return None + return networks['networks'][0]['id'] + + +def _get_subnet_id(module, neutron): + global _os_network_id + subnet_id = None + _os_network_id = _get_net_id(neutron, module) + if not _os_network_id: + module.fail_json(msg = "network id of network not found.") + else: + kwargs = { + 'name': module.params['name'], + } + try: + subnets = neutron.list_subnets(**kwargs) + except Exception, e: + module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) + if not subnets['subnets']: + return None + return subnets['subnets'][0]['id'] + +def _create_subnet(module, neutron): + neutron.format = 'json' + subnet = { + 'name': module.params['name'], + 'ip_version': module.params['ip_version'], + 'enable_dhcp': module.params['enable_dhcp'], + 'gateway_ip': module.params['gateway_ip'], + 'dns_nameservers': module.params['dns_nameservers'], + 'network_id': _os_network_id, + 'cidr': module.params['cidr'], + } + if module.params['allocation_pool_start'] and module.params['allocation_pool_end']: + allocation_pools = [ + { + 'start' : module.params['allocation_pool_start'], + 'end' : module.params['allocation_pool_end'] + } + ] + subnet.update({'allocation_pools': allocation_pools}) + if not module.params['gateway_ip']: + subnet.pop('gateway_ip') + if module.params['dns_nameservers']: + subnet['dns_nameservers'] = module.params['dns_nameservers'].split(',') + else: + subnet.pop('dns_nameservers') + try: + new_subnet = neutron.create_subnet(dict(subnet=subnet)) + except Exception, e: + module.fail_json(msg = "Failure in creating subnet: %s" % e.message) + return new_subnet['subnet']['id'] + + +def _delete_subnet(module, neutron, subnet_id): + try: + neutron.delete_subnet(subnet_id) + except Exception, e: + module.fail_json( msg = "Error in deleting subnet: %s" % e.message) + return True + + +def main(): + + argument_spec = openstack_full_argument_spec( + name = dict(required=True), + network_name = dict(required=True), + cidr = dict(required=True), + ip_version = dict(default='4', choices=['4', '6']), + enable_dhcp = dict(default='true', type='bool'), + gateway_ip = dict(default=None), + dns_nameservers = dict(default=None), + allocation_pool_start = dict(default=None), + allocation_pool_end = dict(default=None), + ) + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + try: + cloud = shade.openstack_cloud(**module.params) + neutron = cloud.neutron_client + if module.params['state'] == 'present': + subnet_id = _get_subnet_id(module, neutron) + if not subnet_id: + subnet_id = _create_subnet(module, neutron) + module.exit_json(changed = True, result = "Created" , id = subnet_id) + else: + module.exit_json(changed = False, result = "success" , id = subnet_id) + else: + subnet_id = _get_subnet_id(module, neutron) + if not subnet_id: + module.exit_json(changed = False, result = "success") + else: + _delete_subnet(module, neutron, subnet_id) + module.exit_json(changed = True, result = "deleted") + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() + From abbf8a40ca14427240ef3db8094b567bb6cef1be Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Thu, 16 Apr 2015 09:17:57 -0400 Subject: [PATCH 141/464] Rename os_subnet to os_subnet.py --- cloud/openstack/os_subnet | 213 ---------------------------- cloud/openstack/os_subnet.py | 264 +++++++++++++++-------------------- 2 files changed, 110 insertions(+), 367 deletions(-) delete mode 100644 cloud/openstack/os_subnet diff --git a/cloud/openstack/os_subnet b/cloud/openstack/os_subnet deleted file mode 100644 index ad3a27c2816..00000000000 --- a/cloud/openstack/os_subnet +++ /dev/null @@ -1,213 +0,0 @@ -#!/usr/bin/python -#coding: utf-8 -*- - -# (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - import shade - HAS_SHADE = True -except ImportError: - HAS_SHADE = False - - -DOCUMENTATION = ''' ---- -module: os_subnet -short_description: Add/Remove subnet to an OpenStack network -extends_documentation_fragment: openstack -version_added: "1.10" -description: - - Add or Remove a subnet to an OpenStack network -options: - state: - description: - - Indicate desired state of the resource - choices: ['present', 'absent'] - default: present - network_name: - description: - - Name of the network to which the subnet should be attached - required: true - default: None - name: - description: - - The name of the subnet that should be created - required: true - default: None - cidr: - description: - - The CIDR representation of the subnet that should be assigned to the subnet - required: true - default: None - ip_version: - description: - - The IP version of the subnet 4 or 6 - required: false - default: 4 - enable_dhcp: - description: - - Whether DHCP should be enabled for this subnet. - required: false - default: true - gateway_ip: - description: - - The ip that would be assigned to the gateway for this subnet - required: false - default: None - dns_nameservers: - description: - - DNS nameservers for this subnet, comma-separated - required: false - default: None - version_added: "1.4" - allocation_pool_start: - description: - - From the subnet pool the starting address from which the IP should be allocated - required: false - default: None - allocation_pool_end: - description: - - From the subnet pool the last IP that should be assigned to the virtual machines - required: false - default: None -requirements: ["shade"] -''' - -EXAMPLES = ''' -# Create a subnet with the specified network -- os_subnet: state=present username=admin password=admin - project_name=admin - network_name=network1 name=net1subnet cidr=192.168.0.0/24" -''' - -_os_network_id = None - -def _get_net_id(neutron, module): - kwargs = { - 'name': module.params['network_name'], - } - try: - networks = neutron.list_networks(**kwargs) - except Exception, e: - module.fail_json("Error in listing neutron networks: %s" % e.message) - if not networks['networks']: - return None - return networks['networks'][0]['id'] - - -def _get_subnet_id(module, neutron): - global _os_network_id - subnet_id = None - _os_network_id = _get_net_id(neutron, module) - if not _os_network_id: - module.fail_json(msg = "network id of network not found.") - else: - kwargs = { - 'name': module.params['name'], - } - try: - subnets = neutron.list_subnets(**kwargs) - except Exception, e: - module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) - if not subnets['subnets']: - return None - return subnets['subnets'][0]['id'] - -def _create_subnet(module, neutron): - neutron.format = 'json' - subnet = { - 'name': module.params['name'], - 'ip_version': module.params['ip_version'], - 'enable_dhcp': module.params['enable_dhcp'], - 'gateway_ip': module.params['gateway_ip'], - 'dns_nameservers': module.params['dns_nameservers'], - 'network_id': _os_network_id, - 'cidr': module.params['cidr'], - } - if module.params['allocation_pool_start'] and module.params['allocation_pool_end']: - allocation_pools = [ - { - 'start' : module.params['allocation_pool_start'], - 'end' : module.params['allocation_pool_end'] - } - ] - subnet.update({'allocation_pools': allocation_pools}) - if not module.params['gateway_ip']: - subnet.pop('gateway_ip') - if module.params['dns_nameservers']: - subnet['dns_nameservers'] = module.params['dns_nameservers'].split(',') - else: - subnet.pop('dns_nameservers') - try: - new_subnet = neutron.create_subnet(dict(subnet=subnet)) - except Exception, e: - module.fail_json(msg = "Failure in creating subnet: %s" % e.message) - return new_subnet['subnet']['id'] - - -def _delete_subnet(module, neutron, subnet_id): - try: - neutron.delete_subnet(subnet_id) - except Exception, e: - module.fail_json( msg = "Error in deleting subnet: %s" % e.message) - return True - - -def main(): - - argument_spec = openstack_full_argument_spec( - name = dict(required=True), - network_name = dict(required=True), - cidr = dict(required=True), - ip_version = dict(default='4', choices=['4', '6']), - enable_dhcp = dict(default='true', type='bool'), - gateway_ip = dict(default=None), - dns_nameservers = dict(default=None), - allocation_pool_start = dict(default=None), - allocation_pool_end = dict(default=None), - ) - module_kwargs = openstack_module_kwargs() - module = AnsibleModule(argument_spec, **module_kwargs) - - if not HAS_SHADE: - module.fail_json(msg='shade is required for this module') - - try: - cloud = shade.openstack_cloud(**module.params) - neutron = cloud.neutron_client - if module.params['state'] == 'present': - subnet_id = _get_subnet_id(module, neutron) - if not subnet_id: - subnet_id = _create_subnet(module, neutron) - module.exit_json(changed = True, result = "Created" , id = subnet_id) - else: - module.exit_json(changed = False, result = "success" , id = subnet_id) - else: - subnet_id = _get_subnet_id(module, neutron) - if not subnet_id: - module.exit_json(changed = False, result = "success") - else: - _delete_subnet(module, neutron, subnet_id) - module.exit_json(changed = True, result = "deleted") - except shade.OpenStackCloudException as e: - module.fail_json(msg=e.message) - - -# this is magic, see lib/ansible/module_common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() - diff --git a/cloud/openstack/os_subnet.py b/cloud/openstack/os_subnet.py index 75bf7b33313..ad3a27c2816 100644 --- a/cloud/openstack/os_subnet.py +++ b/cloud/openstack/os_subnet.py @@ -28,7 +28,7 @@ DOCUMENTATION = ''' module: os_subnet short_description: Add/Remove subnet to an OpenStack network extends_documentation_fragment: openstack -version_added: "2.0" +version_added: "1.10" description: - Add or Remove a subnet to an OpenStack network options: @@ -36,23 +36,21 @@ options: description: - Indicate desired state of the resource choices: ['present', 'absent'] - required: false default: present network_name: description: - Name of the network to which the subnet should be attached - required: true when state is 'present' + required: true + default: None name: description: - - The name of the subnet that should be created. Although Neutron - allows for non-unique subnet names, this module enforces subnet - name uniqueness. + - The name of the subnet that should be created required: true + default: None cidr: description: - - The CIDR representation of the subnet that should be assigned to - the subnet. - required: true when state is 'present' + - The CIDR representation of the subnet that should be assigned to the subnet + required: true default: None ip_version: description: @@ -71,181 +69,139 @@ options: default: None dns_nameservers: description: - - List of DNS nameservers for this subnet. + - DNS nameservers for this subnet, comma-separated required: false default: None + version_added: "1.4" allocation_pool_start: description: - - From the subnet pool the starting address from which the IP should - be allocated. + - From the subnet pool the starting address from which the IP should be allocated required: false default: None allocation_pool_end: description: - - From the subnet pool the last IP that should be assigned to the - virtual machines. + - From the subnet pool the last IP that should be assigned to the virtual machines required: false default: None - host_routes: - description: - - A list of host route dictionaries for the subnet. - required: false - default: None -requirements: - - "python >= 2.6" - - "shade" +requirements: ["shade"] ''' EXAMPLES = ''' -# Create a new (or update an existing) subnet on the specified network -- os_subnet: - state=present - network_name=network1 - name=net1subnet - cidr=192.168.0.0/24 - dns_nameservers: - - 8.8.8.7 - - 8.8.8.8 - host_routes: - - destination: 0.0.0.0/0 - nexthop: 123.456.78.9 - - destination: 192.168.0.0/24 - nexthop: 192.168.0.1 - -# Delete a subnet -- os_subnet: - state=absent - name=net1subnet +# Create a subnet with the specified network +- os_subnet: state=present username=admin password=admin + project_name=admin + network_name=network1 name=net1subnet cidr=192.168.0.0/24" ''' +_os_network_id = None -def _needs_update(subnet, module): - """Check for differences in the updatable values.""" - enable_dhcp = module.params['enable_dhcp'] - subnet_name = module.params['name'] - pool_start = module.params['allocation_pool_start'] - pool_end = module.params['allocation_pool_end'] - gateway_ip = module.params['gateway_ip'] - dns = module.params['dns_nameservers'] - host_routes = module.params['host_routes'] - curr_pool = subnet['allocation_pools'][0] - - if subnet['enable_dhcp'] != enable_dhcp: - return True - if subnet_name and subnet['name'] != subnet_name: - return True - if pool_start and curr_pool['start'] != pool_start: - return True - if pool_end and curr_pool['end'] != pool_end: - return True - if gateway_ip and subnet['gateway_ip'] != gateway_ip: - return True - if dns and sorted(subnet['dns_nameservers']) != sorted(dns): - return True - if host_routes: - curr_hr = sorted(subnet['host_routes'], key=lambda t: t.keys()) - new_hr = sorted(host_routes, key=lambda t: t.keys()) - if sorted(curr_hr) != sorted(new_hr): - return True - return False +def _get_net_id(neutron, module): + kwargs = { + 'name': module.params['network_name'], + } + try: + networks = neutron.list_networks(**kwargs) + except Exception, e: + module.fail_json("Error in listing neutron networks: %s" % e.message) + if not networks['networks']: + return None + return networks['networks'][0]['id'] + + +def _get_subnet_id(module, neutron): + global _os_network_id + subnet_id = None + _os_network_id = _get_net_id(neutron, module) + if not _os_network_id: + module.fail_json(msg = "network id of network not found.") + else: + kwargs = { + 'name': module.params['name'], + } + try: + subnets = neutron.list_subnets(**kwargs) + except Exception, e: + module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) + if not subnets['subnets']: + return None + return subnets['subnets'][0]['id'] + +def _create_subnet(module, neutron): + neutron.format = 'json' + subnet = { + 'name': module.params['name'], + 'ip_version': module.params['ip_version'], + 'enable_dhcp': module.params['enable_dhcp'], + 'gateway_ip': module.params['gateway_ip'], + 'dns_nameservers': module.params['dns_nameservers'], + 'network_id': _os_network_id, + 'cidr': module.params['cidr'], + } + if module.params['allocation_pool_start'] and module.params['allocation_pool_end']: + allocation_pools = [ + { + 'start' : module.params['allocation_pool_start'], + 'end' : module.params['allocation_pool_end'] + } + ] + subnet.update({'allocation_pools': allocation_pools}) + if not module.params['gateway_ip']: + subnet.pop('gateway_ip') + if module.params['dns_nameservers']: + subnet['dns_nameservers'] = module.params['dns_nameservers'].split(',') + else: + subnet.pop('dns_nameservers') + try: + new_subnet = neutron.create_subnet(dict(subnet=subnet)) + except Exception, e: + module.fail_json(msg = "Failure in creating subnet: %s" % e.message) + return new_subnet['subnet']['id'] -def _system_state_change(module, subnet): - state = module.params['state'] - if state == 'present': - if not subnet: - return True - return _needs_update(subnet, module) - if state == 'absent' and subnet: - return True - return False +def _delete_subnet(module, neutron, subnet_id): + try: + neutron.delete_subnet(subnet_id) + except Exception, e: + module.fail_json( msg = "Error in deleting subnet: %s" % e.message) + return True def main(): + argument_spec = openstack_full_argument_spec( - name=dict(required=True), - network_name=dict(default=None), - cidr=dict(default=None), - ip_version=dict(default='4', choices=['4', '6']), - enable_dhcp=dict(default='true', type='bool'), - gateway_ip=dict(default=None), - dns_nameservers=dict(default=None, type='list'), - allocation_pool_start=dict(default=None), - allocation_pool_end=dict(default=None), - host_routes=dict(default=None, type='list'), + name = dict(required=True), + network_name = dict(required=True), + cidr = dict(required=True), + ip_version = dict(default='4', choices=['4', '6']), + enable_dhcp = dict(default='true', type='bool'), + gateway_ip = dict(default=None), + dns_nameservers = dict(default=None), + allocation_pool_start = dict(default=None), + allocation_pool_end = dict(default=None), ) - module_kwargs = openstack_module_kwargs() - module = AnsibleModule(argument_spec, - supports_check_mode=True, - **module_kwargs) + module = AnsibleModule(argument_spec, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') - state = module.params['state'] - network_name = module.params['network_name'] - cidr = module.params['cidr'] - ip_version = module.params['ip_version'] - enable_dhcp = module.params['enable_dhcp'] - subnet_name = module.params['name'] - gateway_ip = module.params['gateway_ip'] - dns = module.params['dns_nameservers'] - pool_start = module.params['allocation_pool_start'] - pool_end = module.params['allocation_pool_end'] - host_routes = module.params['host_routes'] - - # Check for required parameters when state == 'present' - if state == 'present': - for p in ['network_name', 'cidr']: - if not module.params[p]: - module.fail_json(msg='%s required with present state' % p) - - if pool_start and pool_end: - pool = [dict(start=pool_start, end=pool_end)] - elif pool_start or pool_end: - module.fail_json(msg='allocation pool requires start and end values') - else: - pool = None - try: cloud = shade.openstack_cloud(**module.params) - subnet = cloud.get_subnet(subnet_name) - - if module.check_mode: - module.exit_json(changed=_system_state_change(module, subnet)) - - if state == 'present': - if not subnet: - subnet = cloud.create_subnet(network_name, cidr, - ip_version=ip_version, - enable_dhcp=enable_dhcp, - subnet_name=subnet_name, - gateway_ip=gateway_ip, - dns_nameservers=dns, - allocation_pools=pool, - host_routes=host_routes) - module.exit_json(changed=True, result="created") + neutron = cloud.neutron_client + if module.params['state'] == 'present': + subnet_id = _get_subnet_id(module, neutron) + if not subnet_id: + subnet_id = _create_subnet(module, neutron) + module.exit_json(changed = True, result = "Created" , id = subnet_id) else: - if _needs_update(subnet, module): - cloud.update_subnet(subnet['id'], - subnet_name=subnet_name, - enable_dhcp=enable_dhcp, - gateway_ip=gateway_ip, - dns_nameservers=dns, - allocation_pools=pool, - host_routes=host_routes) - module.exit_json(changed=True, result="updated") - else: - module.exit_json(changed=False, result="success") - - elif state == 'absent': - if not subnet: - module.exit_json(changed=False, result="success") + module.exit_json(changed = False, result = "success" , id = subnet_id) + else: + subnet_id = _get_subnet_id(module, neutron) + if not subnet_id: + module.exit_json(changed = False, result = "success") else: - cloud.delete_subnet(subnet_name) - module.exit_json(changed=True, result="deleted") - + _delete_subnet(module, neutron, subnet_id) + module.exit_json(changed = True, result = "deleted") except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) @@ -253,5 +209,5 @@ def main(): # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -if __name__ == '__main__': - main() +main() + From 3692518643ed040382282520d9a578ff43d228b7 Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Thu, 16 Apr 2015 09:19:29 -0400 Subject: [PATCH 142/464] Update os_subnet module for latest shade Shade can now handle creating, updating and deleting subnets. This cleans up the module to take advantage of that. --- cloud/openstack/os_subnet.py | 259 ++++++++++++++++++++--------------- 1 file changed, 149 insertions(+), 110 deletions(-) diff --git a/cloud/openstack/os_subnet.py b/cloud/openstack/os_subnet.py index ad3a27c2816..e6f02816e1d 100644 --- a/cloud/openstack/os_subnet.py +++ b/cloud/openstack/os_subnet.py @@ -40,18 +40,18 @@ options: network_name: description: - Name of the network to which the subnet should be attached - required: true - default: None + required: true when state is 'present' name: description: - - The name of the subnet that should be created + - The name of the subnet that should be created. Although Neutron + allows for non-unique subnet names, this module enforces subnet + name uniqueness. required: true - default: None cidr: description: - - The CIDR representation of the subnet that should be assigned to the subnet - required: true - default: None + - The CIDR representation of the subnet that should be assigned to + the subnet. + required: true when state is 'present' ip_version: description: - The IP version of the subnet 4 or 6 @@ -69,139 +69,179 @@ options: default: None dns_nameservers: description: - - DNS nameservers for this subnet, comma-separated + - List of DNS nameservers for this subnet. required: false default: None - version_added: "1.4" allocation_pool_start: description: - - From the subnet pool the starting address from which the IP should be allocated + - From the subnet pool the starting address from which the IP should + be allocated. required: false default: None allocation_pool_end: description: - - From the subnet pool the last IP that should be assigned to the virtual machines + - From the subnet pool the last IP that should be assigned to the + virtual machines. + required: false + default: None + host_routes: + description: + - A list of host route dictionaries for the subnet. required: false default: None requirements: ["shade"] ''' EXAMPLES = ''' -# Create a subnet with the specified network -- os_subnet: state=present username=admin password=admin - project_name=admin - network_name=network1 name=net1subnet cidr=192.168.0.0/24" +# Create a new (or update an existing) subnet on the specified network +- os_subnet: + state=present + network_name=network1 + name=net1subnet + cidr=192.168.0.0/24 + dns_nameservers: + - 8.8.8.7 + - 8.8.8.8 + host_routes: + - destination: 0.0.0.0/0 + nexthop: 123.456.78.9 + - destination: 192.168.0.0/24 + nexthop: 192.168.0.1 + +# Delete a subnet +- os_subnet: + state=absent + name=net1subnet ''' -_os_network_id = None - -def _get_net_id(neutron, module): - kwargs = { - 'name': module.params['network_name'], - } - try: - networks = neutron.list_networks(**kwargs) - except Exception, e: - module.fail_json("Error in listing neutron networks: %s" % e.message) - if not networks['networks']: - return None - return networks['networks'][0]['id'] - - -def _get_subnet_id(module, neutron): - global _os_network_id - subnet_id = None - _os_network_id = _get_net_id(neutron, module) - if not _os_network_id: - module.fail_json(msg = "network id of network not found.") - else: - kwargs = { - 'name': module.params['name'], - } - try: - subnets = neutron.list_subnets(**kwargs) - except Exception, e: - module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) - if not subnets['subnets']: - return None - return subnets['subnets'][0]['id'] - -def _create_subnet(module, neutron): - neutron.format = 'json' - subnet = { - 'name': module.params['name'], - 'ip_version': module.params['ip_version'], - 'enable_dhcp': module.params['enable_dhcp'], - 'gateway_ip': module.params['gateway_ip'], - 'dns_nameservers': module.params['dns_nameservers'], - 'network_id': _os_network_id, - 'cidr': module.params['cidr'], - } - if module.params['allocation_pool_start'] and module.params['allocation_pool_end']: - allocation_pools = [ - { - 'start' : module.params['allocation_pool_start'], - 'end' : module.params['allocation_pool_end'] - } - ] - subnet.update({'allocation_pools': allocation_pools}) - if not module.params['gateway_ip']: - subnet.pop('gateway_ip') - if module.params['dns_nameservers']: - subnet['dns_nameservers'] = module.params['dns_nameservers'].split(',') - else: - subnet.pop('dns_nameservers') - try: - new_subnet = neutron.create_subnet(dict(subnet=subnet)) - except Exception, e: - module.fail_json(msg = "Failure in creating subnet: %s" % e.message) - return new_subnet['subnet']['id'] - -def _delete_subnet(module, neutron, subnet_id): - try: - neutron.delete_subnet(subnet_id) - except Exception, e: - module.fail_json( msg = "Error in deleting subnet: %s" % e.message) - return True +def _needs_update(subnet, module): + """Check for differences in the updatable values.""" + enable_dhcp = module.params['enable_dhcp'] + subnet_name = module.params['name'] + pool_start = module.params['allocation_pool_start'] + pool_end = module.params['allocation_pool_end'] + gateway_ip = module.params['gateway_ip'] + dns = module.params['dns_nameservers'] + host_routes = module.params['host_routes'] + curr_pool = subnet['allocation_pools'][0] + + if subnet['enable_dhcp'] != enable_dhcp: + return True + if subnet_name and subnet['name'] != subnet_name: + return True + if pool_start and curr_pool['start'] != pool_start: + return True + if pool_end and curr_pool['end'] != pool_end: + return True + if gateway_ip and subnet['gateway_ip'] != gateway_ip: + return True + if dns and sorted(subnet['dns_nameservers']) != sorted(dns): + return True + if host_routes: + curr_hr = sorted(subnet['host_routes'], key=lambda t: t.keys()) + new_hr = sorted(host_routes, key=lambda t: t.keys()) + if sorted(curr_hr) != sorted(new_hr): + return True + return False + + +def _system_state_change(module, subnet): + state = module.params['state'] + if state == 'present': + if not subnet: + return True + return _needs_update(subnet, module) + if state == 'absent' and subnet: + return True + return False def main(): - argument_spec = openstack_full_argument_spec( - name = dict(required=True), - network_name = dict(required=True), - cidr = dict(required=True), - ip_version = dict(default='4', choices=['4', '6']), - enable_dhcp = dict(default='true', type='bool'), - gateway_ip = dict(default=None), - dns_nameservers = dict(default=None), - allocation_pool_start = dict(default=None), - allocation_pool_end = dict(default=None), + name=dict(required=True), + network_name=dict(default=None), + cidr=dict(default=None), + ip_version=dict(default='4', choices=['4', '6']), + enable_dhcp=dict(default='true', type='bool'), + gateway_ip=dict(default=None), + dns_nameservers=dict(default=None, type='list'), + allocation_pool_start=dict(default=None), + allocation_pool_end=dict(default=None), + host_routes=dict(default=None, type='list'), ) + module_kwargs = openstack_module_kwargs() - module = AnsibleModule(argument_spec, **module_kwargs) + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') + state = module.params['state'] + network_name = module.params['network_name'] + cidr = module.params['cidr'] + ip_version = module.params['ip_version'] + enable_dhcp = module.params['enable_dhcp'] + subnet_name = module.params['name'] + gateway_ip = module.params['gateway_ip'] + dns = module.params['dns_nameservers'] + pool_start = module.params['allocation_pool_start'] + pool_end = module.params['allocation_pool_end'] + host_routes = module.params['host_routes'] + + # Check for required parameters when state == 'present' + if state == 'present': + for p in ['network_name', 'cidr']: + if not module.params[p]: + module.fail_json(msg='%s required with present state' % p) + + if pool_start and pool_end: + pool = [dict(start=pool_start, end=pool_end)] + elif pool_start or pool_end: + module.fail_json(msg='allocation pool requires start and end values') + else: + pool = None + try: cloud = shade.openstack_cloud(**module.params) - neutron = cloud.neutron_client - if module.params['state'] == 'present': - subnet_id = _get_subnet_id(module, neutron) - if not subnet_id: - subnet_id = _create_subnet(module, neutron) - module.exit_json(changed = True, result = "Created" , id = subnet_id) + subnet = cloud.get_subnet(subnet_name) + + if module.check_mode: + module.exit_json(changed=_system_state_change(module, subnet)) + + if state == 'present': + if not subnet: + subnet = cloud.create_subnet(network_name, cidr, + ip_version=ip_version, + enable_dhcp=enable_dhcp, + subnet_name=subnet_name, + gateway_ip=gateway_ip, + dns_nameservers=dns, + allocation_pools=pool, + host_routes=host_routes) + module.exit_json(changed=True, result="created") else: - module.exit_json(changed = False, result = "success" , id = subnet_id) - else: - subnet_id = _get_subnet_id(module, neutron) - if not subnet_id: - module.exit_json(changed = False, result = "success") + if _needs_update(subnet, module): + cloud.update_subnet(subnet['id'], + subnet_name=subnet_name, + enable_dhcp=enable_dhcp, + gateway_ip=gateway_ip, + dns_nameservers=dns, + allocation_pools=pool, + host_routes=host_routes) + module.exit_json(changed=True, result="updated") + else: + module.exit_json(changed=False, result="success") + + elif state == 'absent': + if not subnet: + module.exit_json(changed=False, result="success") else: - _delete_subnet(module, neutron, subnet_id) - module.exit_json(changed = True, result = "deleted") + cloud.delete_subnet(subnet_name) + module.exit_json(changed=True, result="deleted") + except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) @@ -210,4 +250,3 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.openstack import * main() - From 4348475ed334235c06f701d5b2eb29463e36c575 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 4 May 2015 11:06:02 -0400 Subject: [PATCH 143/464] doc fixes --- cloud/openstack/os_subnet.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/openstack/os_subnet.py b/cloud/openstack/os_subnet.py index e6f02816e1d..d9a5c99119c 100644 --- a/cloud/openstack/os_subnet.py +++ b/cloud/openstack/os_subnet.py @@ -28,7 +28,7 @@ DOCUMENTATION = ''' module: os_subnet short_description: Add/Remove subnet to an OpenStack network extends_documentation_fragment: openstack -version_added: "1.10" +version_added: "2.0" description: - Add or Remove a subnet to an OpenStack network options: @@ -36,6 +36,7 @@ options: description: - Indicate desired state of the resource choices: ['present', 'absent'] + required: false default: present network_name: description: @@ -52,6 +53,7 @@ options: - The CIDR representation of the subnet that should be assigned to the subnet. required: true when state is 'present' + default: None ip_version: description: - The IP version of the subnet 4 or 6 From 167e7c2b81f535b8adee92bceb158d91438cb59e Mon Sep 17 00:00:00 2001 From: Will Thames Date: Wed, 8 Apr 2015 12:44:01 +1000 Subject: [PATCH 144/464] Perform privilege grants/revokes only when required Use `has_table_privileges` and `has_database_privileges` to test whether a user already has a privilege before granting it, or whether a user doesn't have a privilege before revoking it. --- database/postgresql/postgresql_user.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index 98f234fc1db..a1d4da4b7af 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -431,6 +431,8 @@ def revoke_privileges(cursor, user, privs): check_funcs = dict(table=has_table_privileges, database=has_database_privileges) changed = False + revoke_funcs = dict(table=revoke_table_privilege, database=revoke_database_privilege) + check_funcs = dict(table=has_table_privilege, database=has_database_privilege) for type_ in privs: for name, privileges in privs[type_].iteritems(): # Check that any of the privileges requested to be removed are @@ -444,6 +446,8 @@ def revoke_privileges(cursor, user, privs): def grant_privileges(cursor, user, privs): if privs is None: return False + grant_funcs = dict(table=grant_table_privilege, database=grant_database_privilege) + check_funcs = dict(table=has_table_privilege, database=has_database_privilege) grant_funcs = dict(table=grant_table_privileges, database=grant_database_privileges) check_funcs = dict(table=has_table_privileges, database=has_database_privileges) From 9927e109b795b1c557e9320490d30b680c445816 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 5 May 2015 10:50:24 -0700 Subject: [PATCH 145/464] Add deprecated documentation for _quantum_subnet --- cloud/openstack/_quantum_subnet.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/openstack/_quantum_subnet.py b/cloud/openstack/_quantum_subnet.py index 105ca32c582..557c9846d09 100644 --- a/cloud/openstack/_quantum_subnet.py +++ b/cloud/openstack/_quantum_subnet.py @@ -31,6 +31,7 @@ DOCUMENTATION = ''' module: quantum_subnet deprecated: Deprecated in 2.0. Use os_subnet instead version_added: "1.2" +deprecated: Deprecated in 2.0. Use os_subnet instead short_description: Add/remove subnet from a network description: - Add/remove subnet from a network From 145361aea39bb5b90f611077b9a2cd8a6f0d2ad4 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 5 May 2015 17:14:04 -0400 Subject: [PATCH 146/464] Revert "Add deprecated documentation for _quantum_subnet" cause it was already added This reverts commit 3fa32c4984e3fa839d1188871b2399f184e395a5. --- cloud/openstack/_quantum_subnet.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloud/openstack/_quantum_subnet.py b/cloud/openstack/_quantum_subnet.py index 557c9846d09..105ca32c582 100644 --- a/cloud/openstack/_quantum_subnet.py +++ b/cloud/openstack/_quantum_subnet.py @@ -31,7 +31,6 @@ DOCUMENTATION = ''' module: quantum_subnet deprecated: Deprecated in 2.0. Use os_subnet instead version_added: "1.2" -deprecated: Deprecated in 2.0. Use os_subnet instead short_description: Add/remove subnet from a network description: - Add/remove subnet from a network From 8c10cc20eccdd1c005a4aa30bbdd79eaf301cb9e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 8 May 2015 10:40:26 -0700 Subject: [PATCH 147/464] Expand tildes and vars in the config file path --- database/mysql/mysql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 286983b38d7..97150775507 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -183,7 +183,7 @@ class InvalidPrivsError(Exception): # MySQL module specific support methods. # -def connect(module, login_user=None, login_password=None, config_file='~/.my.cnf'): +def connect(module, login_user=None, login_password=None, config_file=''): config = { 'host': module.params['login_host'], 'db': 'mysql' From 2b5815efc9be09c761d048df46506dc677d0aec6 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 8 May 2015 13:42:06 -0500 Subject: [PATCH 148/464] Add compileall testing via travis to validate modules are python24 compatible --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index d1a48cb788e..0e3a2af23b3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,7 +10,7 @@ addons: - python2.4 - python2.6 script: - - python2.4 -m compileall -fq -x 'cloud/|/accelerate.py' . + - python2.4 -m compileall -fq -x 'cloud/' . - python2.4 -m compileall -fq cloud/amazon/_ec2_ami_search.py cloud/amazon/ec2_facts.py - python2.6 -m compileall -fq . - python2.7 -m compileall -fq . From 55d48a7a8251b87990b5a24af67ca35645e3f340 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 11 May 2015 13:21:17 -0700 Subject: [PATCH 149/464] Finish up cleanups to modules: * Add python>= 2.6 to documented requirements when a module's deps need python>= 2.6 so we know when a module can use python2.6+ syntax * Remove BabyJSON usage * Change modules to use if __name__ == '__main__' so that they can potentially be unittested The BabJSON changes Fixes #1211 --- cloud/openstack/os_subnet.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_subnet.py b/cloud/openstack/os_subnet.py index d9a5c99119c..61152d75456 100644 --- a/cloud/openstack/os_subnet.py +++ b/cloud/openstack/os_subnet.py @@ -91,7 +91,9 @@ options: - A list of host route dictionaries for the subnet. required: false default: None -requirements: ["shade"] +requirements: + - "python >= 2.6" + - "shade" ''' EXAMPLES = ''' @@ -251,4 +253,5 @@ def main(): # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -main() +if __name__ '__main__': + main() From 3256db99e2eb6c6f1600450fc30cdbd38ce63e8f Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 12 May 2015 10:32:43 -0500 Subject: [PATCH 150/464] Fixing digital_ocean documentation and a bug in os_subnet --- cloud/openstack/os_subnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_subnet.py b/cloud/openstack/os_subnet.py index 61152d75456..75bf7b33313 100644 --- a/cloud/openstack/os_subnet.py +++ b/cloud/openstack/os_subnet.py @@ -253,5 +253,5 @@ def main(): # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -if __name__ '__main__': +if __name__ == '__main__': main() From d796e8f54fc1307035d86d38aa343343cd4eeec2 Mon Sep 17 00:00:00 2001 From: Feanil Patel Date: Wed, 13 May 2015 17:11:28 -0400 Subject: [PATCH 151/464] Tell me what the igw id is if we created an igw. --- cloud/amazon/ec2_vpc.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/cloud/amazon/ec2_vpc.py b/cloud/amazon/ec2_vpc.py index 0870c14ec59..087b13ab15e 100644 --- a/cloud/amazon/ec2_vpc.py +++ b/cloud/amazon/ec2_vpc.py @@ -182,6 +182,17 @@ def get_vpc_info(vpc): 'state': vpc.state, }) +def get_igw_info(igw): + """ + Get info about the internet gateway. + """ + if igw is None: + return {} + + return ({ + 'id': igw.id, + }) + def find_vpc(module, vpc_conn, vpc_id=None, cidr=None): """ Finds a VPC that matches a specific id or cidr + tags From 4e206e3790d7239c1a909a921cc3f22253b1470b Mon Sep 17 00:00:00 2001 From: Benno Joy Date: Thu, 14 May 2015 09:03:50 +0530 Subject: [PATCH 152/464] Fixes issue 1197 for s3 module, where the file gets downloaded even if checksums match --- cloud/amazon/s3.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 50ef9d03f54..5c55fae264b 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -331,6 +331,12 @@ def main(): s3_url = module.params.get('s3_url') src = module.params.get('src') + if overwrite not in ['always', 'never', 'different']: + if module.boolean(overwrite): + overwrite = 'always' + else: + overwrite='never' + if overwrite not in ['always', 'never', 'different']: if module.boolean(overwrite): overwrite = 'always' From fbaeee5dbac1b4ab903ac5a2dc95babb09259d91 Mon Sep 17 00:00:00 2001 From: Feanil Patel Date: Fri, 15 May 2015 09:40:30 -0400 Subject: [PATCH 153/464] Output the Internet Gatewoy id directly instead of in a dict. --- cloud/amazon/ec2_vpc.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/cloud/amazon/ec2_vpc.py b/cloud/amazon/ec2_vpc.py index 087b13ab15e..0870c14ec59 100644 --- a/cloud/amazon/ec2_vpc.py +++ b/cloud/amazon/ec2_vpc.py @@ -182,17 +182,6 @@ def get_vpc_info(vpc): 'state': vpc.state, }) -def get_igw_info(igw): - """ - Get info about the internet gateway. - """ - if igw is None: - return {} - - return ({ - 'id': igw.id, - }) - def find_vpc(module, vpc_conn, vpc_id=None, cidr=None): """ Finds a VPC that matches a specific id or cidr + tags From 823adbbe3685c083a0180da913fcd1750a6c07ed Mon Sep 17 00:00:00 2001 From: jaypei Date: Wed, 29 Apr 2015 19:59:55 +0800 Subject: [PATCH 154/464] use the right way to unescape line string Reference https://github.com/ansible/ansible/issues/10864 --- files/lineinfile.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/files/lineinfile.py b/files/lineinfile.py index fd589b03e8d..e66bdc01131 100644 --- a/files/lineinfile.py +++ b/files/lineinfile.py @@ -22,7 +22,6 @@ import re import os import pipes -import codecs import tempfile DOCUMENTATION = """ @@ -370,14 +369,6 @@ def main(): line = params['line'] - # Replace escape sequences like '\n' while being sure - # not to replace octal escape sequences (\ooo) since they - # match the backref syntax. - if backrefs: - line = re.sub(r'(\\[0-9]{1,3})', r'\\\1', line) - - line = codecs.escape_decode(line)[0] - present(module, dest, params['regexp'], line, ins_aft, ins_bef, create, backup, backrefs) else: From 98061a8f33d6f94ad42cac08a66b4bf3e01b5199 Mon Sep 17 00:00:00 2001 From: Tristan Fisher Date: Fri, 15 May 2015 17:09:54 -0400 Subject: [PATCH 155/464] standardizes bools in argument_spec --- files/file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/file.py b/files/file.py index 329fe1e0263..65d6b01d233 100644 --- a/files/file.py +++ b/files/file.py @@ -155,7 +155,7 @@ def main(): recurse = dict(default=False, type='bool'), force = dict(required=False, default=False, type='bool'), diff_peek = dict(default=None), - validate = dict(required=False, default=None), + validate = dict(required=False, default=None, type='bool'), src = dict(required=False, default=None), ), add_file_common_args=True, From fa9d2f56dfbeb7ffb3a6ab5f89c7a05c756d8b59 Mon Sep 17 00:00:00 2001 From: Tristan Fisher Date: Mon, 18 May 2015 18:44:31 -0400 Subject: [PATCH 156/464] removes bool type from validate. --- files/file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/file.py b/files/file.py index 65d6b01d233..329fe1e0263 100644 --- a/files/file.py +++ b/files/file.py @@ -155,7 +155,7 @@ def main(): recurse = dict(default=False, type='bool'), force = dict(required=False, default=False, type='bool'), diff_peek = dict(default=None), - validate = dict(required=False, default=None, type='bool'), + validate = dict(required=False, default=None), src = dict(required=False, default=None), ), add_file_common_args=True, From c700993dd53316f662b6aa56ec8cfeead185b9b9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 19 May 2015 12:41:48 -0700 Subject: [PATCH 157/464] Fix a problem introduced with #1101 and optimize privilege handling * If a db user belonged to a role which had a privilege, the user would not have the privilege added as the role gave the appearance that the user already had it. Fixed to always check the privileges specific to the user. * Make fewer db queries to determine if privileges need to be changed and change them (was four for each privilege. Now two for each object that has a set of privileges changed). --- database/postgresql/postgresql_user.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index a1d4da4b7af..2998ab273f9 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -431,8 +431,6 @@ def revoke_privileges(cursor, user, privs): check_funcs = dict(table=has_table_privileges, database=has_database_privileges) changed = False - revoke_funcs = dict(table=revoke_table_privilege, database=revoke_database_privilege) - check_funcs = dict(table=has_table_privilege, database=has_database_privilege) for type_ in privs: for name, privileges in privs[type_].iteritems(): # Check that any of the privileges requested to be removed are @@ -446,8 +444,9 @@ def revoke_privileges(cursor, user, privs): def grant_privileges(cursor, user, privs): if privs is None: return False - grant_funcs = dict(table=grant_table_privilege, database=grant_database_privilege) - check_funcs = dict(table=has_table_privilege, database=has_database_privilege) + + grant_funcs = dict(table=grant_table_privileges, database=grant_database_privileges) + check_funcs = dict(table=has_table_privileges, database=has_database_privileges) grant_funcs = dict(table=grant_table_privileges, database=grant_database_privileges) check_funcs = dict(table=has_table_privileges, database=has_database_privileges) From b138411671194e3ec236d8ec3d27bcf32447350d Mon Sep 17 00:00:00 2001 From: tedder Date: Mon, 29 Dec 2014 16:38:08 -0800 Subject: [PATCH 159/464] feature pull request: catch and retry recoverable errors boto can throw SSLError when timeouts occur (among other SSL errors). Catch these so proper JSON can be returned, and also add the ability to retry the operation. There's an open issue in boto for this: https://github.com/boto/boto/issues/2409 Here's a sample stacktrace that inspired me to work on this. I'm on 1.7, but there's no meaningful differences in the 1.8 release that would affect this. I've added line breaks to the trace for readability. failed to parse: Traceback (most recent call last): File "/home/ubuntu/.ansible/tmp/ansible-tmp-1419895753.17-160808281985012/s3", line 2031, in main() File "/home/ubuntu/.ansible/tmp/ansible-tmp-1419895753.17-160808281985012/s3", line 353, in main download_s3file(module, s3, bucket, obj, dest) File "/home/ubuntu/.ansible/tmp/ansible-tmp-1419895753.17-160808281985012/s3", line 234, in download_s3file key.get_contents_to_filename(dest) File "/usr/local/lib/python2.7/dist-packages/boto/s3/key.py", line 1665, in get_contents_to_filename response_headers=response_headers) File "/usr/local/lib/python2.7/dist-packages/boto/s3/key.py", line 1603, in get_contents_to_file response_headers=response_headers) File "/usr/local/lib/python2.7/dist-packages/boto/s3/key.py", line 1435, in get_file query_args=None) File "/usr/local/lib/python2.7/dist-packages/boto/s3/key.py", line 1488, in _get_file_internal for bytes in self: File "/usr/local/lib/python2.7/dist-packages/boto/s3/key.py", line 368, in next data = self.resp.read(self.BufferSize) File "/usr/local/lib/python2.7/dist-packages/boto/connection.py", line 416, in read return httplib.HTTPResponse.read(self, amt) File "/usr/lib/python2.7/httplib.py", line 567, in read s = self.fp.read(amt) File "/usr/lib/python2.7/socket.py", line 380, in read data = self._sock.recv(left) File "/usr/lib/python2.7/ssl.py", line 341, in recv return self.read(buflen) File "/usr/lib/python2.7/ssl.py", line 260, in read return self._sslobj.read(len) ssl.SSLError: The read operation timed out --- cloud/amazon/s3.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 5c55fae264b..6f8e447397d 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -430,7 +430,6 @@ def main(): # At this point explicitly define the overwrite condition. if sum_matches is True and pathrtn is True and overwrite == 'always': - download_s3file(module, s3, bucket, obj, dest) download_s3file(module, s3, bucket, obj, dest, retries) # If sum does not match but the destination exists, we From a2630d40fb2239bf80509dc3843df9787258908e Mon Sep 17 00:00:00 2001 From: Ryan Walls Date: Tue, 2 Jun 2015 00:09:01 -0600 Subject: [PATCH 160/464] Fix typo in Docker module. --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 977969da03f..c4f8e3e9f0b 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1129,7 +1129,7 @@ class DockerManager(object): # LOG_DRIVER - if self.ensure_capability('log_driver', false) : + if self.ensure_capability('log_driver', False) : expected_log_driver = self.module.params.get('log_driver') or 'json-file' actual_log_driver = container['HostConfig']['LogConfig']['Type'] if actual_log_driver != expected_log_driver: From 61741b60c6ff9f2fe468d2a4cc5be9036c84dd0a Mon Sep 17 00:00:00 2001 From: billwanjohi Date: Thu, 28 May 2015 17:26:58 +0000 Subject: [PATCH 161/464] cloudformation: accept local templates in yaml format Since the YAML data format is a subset of JSON, it is trivial to convert the former to the latter. This means that we can use YAML templates to build cloudformation stacks, as long as we translate them before passing them to the AWS API. I figure this could potentially be quite popular in the Ansible world, since we already use so much YAML for our playbooks. --- cloud/amazon/cloudformation.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index eb49f66805c..1718ef142b1 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -81,6 +81,12 @@ options: - Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region as the stack. This parameter is mutually exclusive with 'template'. Either one of them is required if "state" parameter is "present" required: false version_added: "2.0" + template_format: + description: For local templates, allows specification of json or yaml format + default: json + choices: [ json, yaml ] + required: false + version_added: "2.0" author: James S. Martin extends_documentation_fragment: aws @@ -127,6 +133,7 @@ EXAMPLES = ''' import json import time +import yaml try: import boto @@ -224,6 +231,7 @@ def main(): stack_policy=dict(default=None, required=False), disable_rollback=dict(default=False, type='bool'), template_url=dict(default=None, required=False), + template_format=dict(default='json', choices=['json', 'yaml'], required=False), tags=dict(default=None) ) ) @@ -250,6 +258,12 @@ def main(): else: template_body = None + if module.params['template_format'] == 'yaml': + if template_body is None: + module.fail_json(msg='yaml format only supported for local templates') + else: + template_body = json.dumps(yaml.load(template_body), indent=2) + if module.params['stack_policy'] is not None: stack_policy_body = open(module.params['stack_policy'], 'r').read() else: From f9f17b5e047660742a8014f908551462aef7270e Mon Sep 17 00:00:00 2001 From: Sankalp Khare Date: Thu, 4 Jun 2015 03:40:49 +0530 Subject: [PATCH 162/464] Fixed a typo in ec2_vpc module documentation --- cloud/amazon/ec2_vpc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) mode change 100644 => 100755 cloud/amazon/ec2_vpc.py diff --git a/cloud/amazon/ec2_vpc.py b/cloud/amazon/ec2_vpc.py old mode 100644 new mode 100755 index 0870c14ec59..e34ea3163b2 --- a/cloud/amazon/ec2_vpc.py +++ b/cloud/amazon/ec2_vpc.py @@ -58,7 +58,7 @@ options: aliases: [] resource_tags: description: - - 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exits, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.' + - 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exist, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.' required: true default: null aliases: [] From ed5ea7e921c0a8a776b2cf42ef86b3e871d643f8 Mon Sep 17 00:00:00 2001 From: "Ching Yi, Chan" Date: Thu, 4 Jun 2015 14:28:57 +0800 Subject: [PATCH 163/464] Prevent memory-error from a large file --- cloud/amazon/s3.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 6f8e447397d..545955e90cd 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -296,6 +296,15 @@ def is_walrus(s3_url): else: return False +def get_md5_digest(local_file): + md5 = hashlib.md5() + with open(local_file, 'rb') as f: + while True: + data = f.read(1024 ** 2) + if not data: break + md5.update(data) + return md5.hexdigest() + def main(): argument_spec = ec2_argument_spec() @@ -410,7 +419,7 @@ def main(): # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists. if pathrtn is True: md5_remote = keysum(module, s3, bucket, obj) - md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest() + md5_local = get_md5_digest(dest) if md5_local == md5_remote: sum_matches = True if overwrite == 'always': @@ -454,7 +463,8 @@ def main(): # Lets check key state. Does it exist and if it does, compute the etag md5sum. if bucketrtn is True and keyrtn is True: md5_remote = keysum(module, s3, bucket, obj) - md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest() + md5_local = get_md5_digest(src) + if md5_local == md5_remote: sum_matches = True if overwrite == 'always': From 9e56b42574fbfe02258569bc496d484e02fc9a74 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 4 Jun 2015 11:42:39 -0400 Subject: [PATCH 164/464] AnsibleModule takes a dict as a param --- cloud/openstack/os_client_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_client_config.py b/cloud/openstack/os_client_config.py index 09a9e713210..281bad49621 100644 --- a/cloud/openstack/os_client_config.py +++ b/cloud/openstack/os_client_config.py @@ -38,7 +38,7 @@ EXAMPLES = ''' def main(): - module = AnsibleModule() + module = AnsibleModule({}) p = module.params try: From 1b5b1cbfe8cb24001c972070fbaf73e5c71705b2 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 4 Jun 2015 12:08:27 -0400 Subject: [PATCH 165/464] Add missing state parameter --- cloud/openstack/os_network.py | 1 + cloud/openstack/os_subnet.py | 1 + 2 files changed, 2 insertions(+) diff --git a/cloud/openstack/os_network.py b/cloud/openstack/os_network.py index b77a7e331a4..18e0aaa9a27 100644 --- a/cloud/openstack/os_network.py +++ b/cloud/openstack/os_network.py @@ -66,6 +66,7 @@ def main(): name=dict(required=True), shared=dict(default=False, type='bool'), admin_state_up=dict(default=True, type='bool'), + state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() diff --git a/cloud/openstack/os_subnet.py b/cloud/openstack/os_subnet.py index 75bf7b33313..2fdb4e0dd6d 100644 --- a/cloud/openstack/os_subnet.py +++ b/cloud/openstack/os_subnet.py @@ -173,6 +173,7 @@ def main(): allocation_pool_start=dict(default=None), allocation_pool_end=dict(default=None), host_routes=dict(default=None, type='list'), + state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() From 1e14e51150c88495a6b30f99e6b09e426a49827c Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 31 Mar 2015 16:37:07 -0400 Subject: [PATCH 166/464] Add OpenStack Security Group support Two modules - one for security groups and one to manage rules in a security group. --- cloud/openstack/os_security_group.py | 113 ++++++++++++++++ cloud/openstack/os_security_group_rule.py | 156 ++++++++++++++++++++++ 2 files changed, 269 insertions(+) create mode 100644 cloud/openstack/os_security_group.py create mode 100644 cloud/openstack/os_security_group_rule.py diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py new file mode 100644 index 00000000000..193a156251a --- /dev/null +++ b/cloud/openstack/os_security_group.py @@ -0,0 +1,113 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade +except ImportError: + print("failed=True msg='shade is required for this module'") + + +DOCUMENTATION = ''' +--- +module: os_security_group +short_description: Add/Delete security groups from an OpenStack cloud. +extends_documentation_fragment: openstack +version_added: "2.0" +description: + - Add or Remove security groups from an OpenStack cloud. +options: + name: + description: + - Name that has to be given to the security group + required: true + description: + description: + - Long description of the purpose of the security group + required: false + default: None + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present + +requirements: ["shade"] +''' + +EXAMPLES = ''' +# Create a security group +- os_security_group: cloud=mordred name=foo + description=security group for foo servers +''' + + +def _security_group(module, nova_client, action='create', **kwargs): + f = getattr(nova_client.security_groups, action) + try: + secgroup = f(**kwargs) + except Exception, e: + module.fail_json(msg='Failed to %s security group %s: %s' % + (action, module.params['name'], e.message)) + + +def main(): + + argument_spec = openstack_full_argument_spec( + name = dict(required=True), + description = dict(default=None), + state = dict(default='present', choices=['absent', 'present']), + ) + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + + try: + cloud = shade.openstack_cloud(**module.params) + nova_client = cloud.nova_client + changed = False + secgroup = cloud.get_security_group(module.params['name']) + + if module.params['state'] == 'present': + secgroup = cloud.get_security_group(module.params['name']) + if not secgroup: + _security_group(module, nova_client, action='create', + name=module.params['name'], + description=module.params['description']) + changed = True + + if secgroup and secgroup.description != module.params['description']: + _security_group(module, nova_client, action='update', + group=secgroup.id, + name=module.params['name'], + description=module.params['description']) + changed = True + + if module.params['state'] == 'absent': + if secgroup: + _security_group(module, nova_client, action='delete', + group=secgroup.id) + changed = True + + module.exit_json(changed=changed, id=module.params['name'], result="success") + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py new file mode 100644 index 00000000000..849919e6394 --- /dev/null +++ b/cloud/openstack/os_security_group_rule.py @@ -0,0 +1,156 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade +except ImportError: + print("failed=True msg='shade is required for this module'") + + +DOCUMENTATION = ''' +--- +module: os_security_group_rule +short_description: Add/Delete rule from an existing security group +extends_documentation_fragment: openstack +version_added: "2.0" +description: + - Add or Remove rule from an existing security group +options: + security_group: + description: + - Name of the security group + required: true + protocol: + description: + - IP protocol + choices: ['tcp', 'udp', 'icmp'] + default: tcp + port_range_min: + description: + - Starting port + required: true + port_range_max: + description: + - Ending port + required: true + remote_ip_prefix: + description: + - Source IP address(es) in CIDR notation (exclusive with remote_group) + required: false + remote_group: + description: + - ID of Security group to link (exclusive with remote_ip_prefix) + required: false + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present + +requirements: ["shade"] +''' +# TODO(mordred): add ethertype and direction + +EXAMPLES = ''' +# Create a security group rule +- os_security_group_rule: + cloud: mordred + security_group: foo + protocol: tcp + port_range_min: 80 + port_range_max: 80 + remote_ip_prefix: 0.0.0.0/0 +''' + + +def _security_group_rule(module, nova_client, action='create', **kwargs): + f = getattr(nova_client.security_group_rules, action) + try: + secgroup = f(**kwargs) + except Exception, e: + module.fail_json(msg='Failed to %s security group rule: %s' % + (action, e.message)) + + +def _get_rule_from_group(module, secgroup): + for rule in secgroup.rules: + if (rule['ip_protocol'] == module.params['protocol'] and + rule['from_port'] == module.params['port_range_min'] and + rule['to_port'] == module.params['port_range_max'] and + rule['ip_range']['cidr'] == module.params['remote_ip_prefix']): + return rule + return None + +def main(): + + argument_spec = openstack_full_argument_spec( + security_group = dict(required=True), + protocol = dict(default='tcp', choices=['tcp', 'udp', 'icmp']), + port_range_min = dict(required=True), + port_range_max = dict(required=True), + remote_ip_prefix = dict(required=False, default=None), + # TODO(mordred): Make remote_group handle name and id + remote_group = dict(required=False, default=None), + state = dict(default='present', choices=['absent', 'present']), + ) + module_kwargs = openstack_module_kwargs( + mutually_exclusive=[ + ['remote_ip_prefix', 'remote_group'], + ], + ) + module = AnsibleModule(argument_spec, **module_kwargs) + + try: + cloud = shade.openstack_cloud(**module.params) + nova_client = cloud.nova_client + changed = False + + secgroup = cloud.get_security_group(module.params['security_group']) + + if module.params['state'] == 'present': + if not secgroup: + module.fail_json(msg='Could not find security group %s' % + module.params['security_group']) + + if not _get_rule_from_group(module, secgroup): + _security_group_rule(module, nova_client, 'create', + parent_group_id=secgroup.id, + ip_protocol=module.params['protocol'], + from_port=module.params['port_range_min'], + to_port=module.params['port_range_max'], + cidr=module.params['remote_ip'], + group_id=module.params['remote_group'], + changed = True + + + if module.params['state'] == 'absent' and secgroup: + rule = _get_rule_from_group(module, secgroup) + if secgroup and rule: + _security_group_rule(module, nova_client, 'delete', + rule=rule['id']) + changed = True + + module.exit_json(changed=changed, result="success") + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() From 8c13ac894a6c717138f04e2e543cd1e6975825dd Mon Sep 17 00:00:00 2001 From: Tim Mahoney Date: Thu, 4 Jun 2015 14:40:05 -0400 Subject: [PATCH 167/464] Elasticache Subnet Group --- cloud/amazon/elasticache_subnet_group.py | 163 +++++++++++++++++++++++ 1 file changed, 163 insertions(+) create mode 100644 cloud/amazon/elasticache_subnet_group.py diff --git a/cloud/amazon/elasticache_subnet_group.py b/cloud/amazon/elasticache_subnet_group.py new file mode 100644 index 00000000000..5956fa37e83 --- /dev/null +++ b/cloud/amazon/elasticache_subnet_group.py @@ -0,0 +1,163 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: elasticache_subnet_group +version_added: "2.0" +short_description: manage Elasticache subnet groups +description: + - Creates, modifies, and deletes Elasticache subnet groups. This module has a dependency on python-boto >= 2.5. +options: + state: + description: + - Specifies whether the subnet should be present or absent. + required: true + default: present + aliases: [] + choices: [ 'present' , 'absent' ] + name: + description: + - Database subnet group identifier. + required: true + default: null + aliases: [] + description: + description: + - Elasticache subnet group description. Only set when a new group is added. + required: false + default: null + aliases: [] + subnets: + description: + - List of subnet IDs that make up the Elasticache subnet group. + required: false + default: null + aliases: [] + region: + description: + - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used. + required: true + default: null + aliases: ['aws_region', 'ec2_region'] +author: Tim Mahoney +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Add or change a subnet group +- elasticache_subnet_group + state: present + name: norwegian-blue + description: My Fancy Ex Parrot Subnet Group + subnets: + - subnet-aaaaaaaa + - subnet-bbbbbbbb + +# Remove a subnet group +- elasticache_subnet_group: + state: absent + name: norwegian-blue +''' + +try: + import boto + from boto.elasticache.layer1 import ElastiCacheConnection + from boto.regioninfo import RegionInfo + from boto.exception import BotoServerError + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state = dict(required=True, choices=['present', 'absent']), + name = dict(required=True), + description = dict(required=False), + subnets = dict(required=False, type='list'), + ) + ) + module = AnsibleModule(argument_spec=argument_spec) + + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + + state = module.params.get('state') + group_name = module.params.get('name').lower() + group_description = module.params.get('description') + group_subnets = module.params.get('subnets') or {} + + if state == 'present': + for required in ['name', 'description', 'subnets']: + if not module.params.get(required): + module.fail_json(msg = str("Parameter %s required for state='present'" % required)) + else: + for not_allowed in ['description', 'subnets']: + if module.params.get(not_allowed): + module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed)) + + # Retrieve any AWS settings from the environment. + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) + + if not region: + module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) + + + """Get an elasticache connection""" + try: + endpoint = "elasticache.%s.amazonaws.com" % region + connect_region = RegionInfo(name=region, endpoint=endpoint) + conn = ElastiCacheConnection(region=connect_region, **aws_connect_kwargs) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=e.message) + + try: + changed = False + exists = False + + try: + matching_groups = conn.describe_cache_subnet_groups(group_name, max_records=100) + exists = len(matching_groups) > 0 + except BotoServerError, e: + if e.error_code != 'CacheSubnetGroupNotFoundFault': + module.fail_json(msg = e.error_message) + + if state == 'absent': + if exists: + conn.delete_cache_subnet_group(group_name) + changed = True + else: + if not exists: + new_group = conn.create_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets) + changed = True + else: + changed_group = conn.modify_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets) + changed = True + + except BotoServerError, e: + if e.error_message != 'No modifications were requested.': + module.fail_json(msg = e.error_message) + else: + changed = False + + module.exit_json(changed=changed) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() \ No newline at end of file From 6b9e9fe05dbfb48862a3d999ea3e2ddd93f820aa Mon Sep 17 00:00:00 2001 From: Benno Joy Date: Fri, 5 Jun 2015 12:31:59 +0530 Subject: [PATCH 168/464] fix for 11177 where module readds rule even if the rules exists --- cloud/amazon/ec2_group.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cloud/amazon/ec2_group.py b/cloud/amazon/ec2_group.py index b85fde9ead3..2109c7ba33b 100644 --- a/cloud/amazon/ec2_group.py +++ b/cloud/amazon/ec2_group.py @@ -131,6 +131,11 @@ def make_rule_key(prefix, rule, group_id, cidr_ip): """Creates a unique key for an individual group rule""" if isinstance(rule, dict): proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')] + #fix for 11177 + if proto not in ['icmp', 'tcp', 'udp'] and from_port == -1 and to_port == -1: + from_port = 'none' + to_port = 'none' + else: # isinstance boto.ec2.securitygroup.IPPermissions proto, from_port, to_port = [getattr(rule, x, None) for x in ('ip_protocol', 'from_port', 'to_port')] From 06aeabc5fb2739d2b07612246b517760ca676e11 Mon Sep 17 00:00:00 2001 From: Constantin Date: Fri, 5 Jun 2015 14:35:26 +0100 Subject: [PATCH 169/464] Added additional stat output values Included in the output as well: - file extension - file attributes - file owner - creation, last access and last write timestamps (in UNIX format) --- windows/win_stat.ps1 | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/windows/win_stat.ps1 b/windows/win_stat.ps1 index 10101a62b30..51c9c827093 100644 --- a/windows/win_stat.ps1 +++ b/windows/win_stat.ps1 @@ -19,6 +19,11 @@ $params = Parse-Args $args; +function Date_To_Timestamp($start_date, $end_date) +{ + Write-Output (New-TimeSpan -Start $start_date -End $end_date).TotalSeconds +} + $path = Get-Attr $params "path" $FALSE; If ($path -eq $FALSE) { @@ -36,6 +41,7 @@ If (Test-Path $path) { Set-Attr $result.stat "exists" $TRUE; $info = Get-Item $path; + $epoch_date = Get-Date -Date "01/01/1970" If ($info.Directory) # Only files have the .Directory attribute. { Set-Attr $result.stat "isdir" $FALSE; @@ -45,6 +51,12 @@ If (Test-Path $path) { Set-Attr $result.stat "isdir" $TRUE; } + Set-Attr $result.stat "extension" $info.Extension; + Set-Attr $result.stat "attributes" $info.Attributes.ToString(); + Set-Attr $result.stat "owner" $info.GetAccessControl().Owner; + Set-Attr $result.stat "creationtime" (Date_To_Timestamp $epoch_date $info.CreationTime); + Set-Attr $result.stat "lastaccesstime" (Date_To_Timestamp $epoch_date $info.LastAccessTime); + Set-Attr $result.stat "lastwritetime" (Date_To_Timestamp $epoch_date $info.LastWriteTime); } Else { From 182e7c7fc6626f1dbf155929879943a217124088 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 5 Jun 2015 10:19:41 -0400 Subject: [PATCH 170/464] made non exec --- cloud/amazon/ec2_vpc.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 cloud/amazon/ec2_vpc.py diff --git a/cloud/amazon/ec2_vpc.py b/cloud/amazon/ec2_vpc.py old mode 100755 new mode 100644 From 837bd04f951bafedb23e5d057d6b961c6d7d6172 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 5 Jun 2015 10:25:25 -0400 Subject: [PATCH 171/464] minor doc fixes --- cloud/amazon/elasticache_subnet_group.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/cloud/amazon/elasticache_subnet_group.py b/cloud/amazon/elasticache_subnet_group.py index 5956fa37e83..4ea7e8aba16 100644 --- a/cloud/amazon/elasticache_subnet_group.py +++ b/cloud/amazon/elasticache_subnet_group.py @@ -27,33 +27,27 @@ options: - Specifies whether the subnet should be present or absent. required: true default: present - aliases: [] choices: [ 'present' , 'absent' ] name: description: - Database subnet group identifier. required: true - default: null - aliases: [] description: description: - Elasticache subnet group description. Only set when a new group is added. required: false default: null - aliases: [] subnets: description: - List of subnet IDs that make up the Elasticache subnet group. required: false default: null - aliases: [] region: description: - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used. required: true - default: null aliases: ['aws_region', 'ec2_region'] -author: Tim Mahoney +author: "Tim Mahoney (@timmahoney)" extends_documentation_fragment: aws ''' @@ -160,4 +154,4 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() \ No newline at end of file +main() From 1517ae8ab27d2493a51fb1eff7cf0c30b5c54f0a Mon Sep 17 00:00:00 2001 From: "Ching Yi, Chan" Date: Fri, 5 Jun 2015 23:29:11 +0800 Subject: [PATCH 172/464] Refactoring for easier to read --- cloud/amazon/s3.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 545955e90cd..d08d1b6a46c 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -299,9 +299,7 @@ def is_walrus(s3_url): def get_md5_digest(local_file): md5 = hashlib.md5() with open(local_file, 'rb') as f: - while True: - data = f.read(1024 ** 2) - if not data: break + for data in f.read(1024 ** 2): md5.update(data) return md5.hexdigest() From f5ff5ba348b1158b569d29125676553a6aa7d49a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 5 Jun 2015 12:01:10 -0400 Subject: [PATCH 173/464] reverting pep changes that were breaking documentation --- cloud/amazon/ec2_group.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/cloud/amazon/ec2_group.py b/cloud/amazon/ec2_group.py index b85fde9ead3..b4cc023cd64 100644 --- a/cloud/amazon/ec2_group.py +++ b/cloud/amazon/ec2_group.py @@ -24,15 +24,11 @@ options: required: false rules: description: - - List of firewall inbound rules to enforce in this group (see''' -''' example). If none are supplied, a default all-out rule is assumed.''' -''' If an empty list is supplied, no inbound rules will be enabled. + - List of firewall inbound rules to enforce in this group (see example). If none are supplied, a default all-out rule is assumed. If an empty list is supplied, no inbound rules will be enabled. required: false rules_egress: description: - - List of firewall outbound rules to enforce in this group (see''' -''' example). If none are supplied, a default all-out rule is assumed.''' -''' If an empty list is supplied, no outbound rules will be enabled. + - List of firewall outbound rules to enforce in this group (see example). If none are supplied, a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled. required: false version_added: "1.6" region: From 801a5e8ed07b01182657e1d091674121bd80c7c6 Mon Sep 17 00:00:00 2001 From: Jonathan Davila Date: Tue, 17 Feb 2015 15:22:40 -0500 Subject: [PATCH 174/464] iam module with fixes iam module. fix policy issue. bugfix allow for modifying path without declaring new_path --- cloud/amazon/iam.py | 726 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 726 insertions(+) create mode 100644 cloud/amazon/iam.py diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py new file mode 100644 index 00000000000..05c74c5b716 --- /dev/null +++ b/cloud/amazon/iam.py @@ -0,0 +1,726 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: iam +short_description: Manage IAM users, groups, roles and keys +description: + - Allows for the management of IAM users, groups, roles and access keys. +version_added: "2.0" +options: + iam_type: + description: + - Type of IAM resource + required: true + default: null + choices: [ "user", "group", "role"] + aliases: [] + name: + description: + - Name of IAM resource to create or identify + required: true + aliases: [] + new_name: + description: + - When state is update, will replace name with new_name on IAM resource + required: false + aliases: [] + new_path: + description: + - When state is update, will replace the path with new_path on the IAM resource + required: false + aliases: [] + state: + description: + - Whether to create, delete or update the IAM resource. Note, roles cannot be updated. + required: true + default: null + choices: [ "present", "absent", "update" ] + aliases: [] + path: + description: + - When creating or updating, specify the desired path of the resource. If state is present, it will replace the current path to match what is passed in when they do not match. + required: false + default: "/" + aliases: [] + access_key_state: + description: + - When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified. + required: false + default: null + choices: [ "create", "remove", "active", "inactive"] + aliases: [] + key_count: + description: + - When access_key_state is create it will ensure this quantity of keys are present. Defaults to 1. + required: false + default: '1' + aliases: [] + access_key_ids: + description: + - A list of the keys that you want impacted by the access_key_state paramter. + groups: + description: + - A list of groups the user should belong to. When update, will gracefully remove groups not listed. + required: false + default: null + aliases: [] + password: + description: + - When type is user and state is present, define the users login password. Also works with update. Note that always returns changed. + required: false + default: null + aliases: [] + update_password: + required: false + default: always + choices: ['always', 'on_create'] + description: + - C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users. + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_secret_key', 'secret_key' ] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_access_key', 'access_key' ] + + +requirements: [ "boto" ] +notes: + - 'Currently boto does not support the removal of Managed Policies, the module will error out if your user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.' +author: Jonathan I. Davila and Paul Seiffert +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Basic user creation example +tasks: +- name: Create two new IAM users with API keys + iam: + iam_type: user + name: "{{ item }}" + state: present + password: "{{ temp_pass }}" + access_key_state: create + with_items: + - jcleese + - mpython + +# Advanced example, create two new groups and add the pre-existing user +# jdavila to both groups. +task: +- name: Create Two Groups, Mario and Luigi + iam: + iam_type: group + name: "{{ item }}" + state: present + with_items: + - Mario + - Luigi + register: new_groups + +- name: + iam: + iam_type: user + name: jdavila + state: update + group: "{{ item.created_group.group_name }}" + with_items: new_groups.results + +''' + +import json +import itertools +import sys +try: + import boto + import boto.iam + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +def boto_exception(err): + '''generic error message handler''' + if hasattr(err, 'error_message'): + error = err.error_message + elif hasattr(err, 'message'): + error = err.message + else: + error = '%s: %s' % (Exception, err) + + return error + + +def create_user(module, iam, name, pwd, path, key_state, key_count): + key_qty = 0 + keys = [] + try: + user_meta = iam.create_user( + name, path).create_user_response.create_user_result.user + changed = True + if pwd is not None: + pwd = iam.create_login_profile(name, pwd) + if key_state in ['create']: + if key_count: + while key_count > key_qty: + keys.append(iam.create_access_key( + user_name=name).create_access_key_response.\ + create_access_key_result.\ + access_key) + key_qty += 1 + else: + keys = None + except boto.exception.BotoServerError, err: + module.fail_json(changed=False, msg=str(err)) + else: + user_info = dict(created_user=user_meta, password=pwd, access_keys=keys) + return (user_info, changed) + + +def delete_user(module, iam, name): + try: + current_keys = [ck['access_key_id'] for ck in + iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] + for key in current_keys: + iam.delete_access_key(key, name) + del_meta = iam.delete_user(name).delete_user_response + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('must detach all policies first') in error_msg: + for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names: + iam.delete_user_policy(name, policy) + try: + del_meta = iam.delete_user(name) + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('must detach all policies first') in error_msg: + module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears" + "that %s has Managed Polices. This is not " + "currently supported by boto. Please detach the polices " + "through the console and try again." % name) + else: + module.fail_json(changed=changed, msg=str(err)) + else: + changed = True + return del_meta, name, changed + else: + changed = True + return del_meta, name, changed + + +def update_user(module, iam, name, new_name, new_path, key_state, key_count, keys, pwd, updated): + changed = False + name_change = False + if updated and new_name: + name = new_name + try: + current_keys, status = \ + [ck['access_key_id'] for ck in + iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata],\ + [ck['status'] for ck in + iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] + key_qty = len(current_keys) + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if 'cannot be found' in error_msg and updated: + current_keys, status = \ + [ck['access_key_id'] for ck in + iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata],\ + [ck['status'] for ck in + iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] + name = new_name + else: + module.fail_json(changed=False, msg=str(err)) + + updated_key_list = {} + + if new_name or new_path: + c_path = iam.get_user(name).get_user_result.user['path'] + if (name != new_name) or (c_path != new_path): + changed = True + try: + if not updated: + user = iam.update_user( + name, new_user_name=new_name, new_path=new_path).update_user_response.response_metadata + else: + user = iam.update_user( + name, new_path=new_path).update_user_response.response_metadata + user['updates'] = dict( + old_username=name, new_username=new_name, old_path=c_path, new_path=new_path) + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + module.fail_json(changed=False, msg=str(err)) + else: + if not updated: + name_change = True + + if pwd: + try: + iam.update_login_profile(name, pwd) + changed = True + except boto.exception.BotoServerError: + try: + iam.create_login_profile(name, pwd) + changed = True + except boto.exception.BotoServerError, err: + error_msg = boto_exception(str(err)) + if 'Password does not conform to the account password policy' in error_msg: + module.fail_json(changed=False, msg="Passsword doesn't conform to policy") + else: + module.fail_json(msg=error_msg) + else: + try: + iam.delete_login_profile(name) + changed = True + except boto.exception.BotoServerError: + pass + + if key_state == 'create': + try: + while key_count > key_qty: + new_key = iam.create_access_key( + user_name=name).create_access_key_response.create_access_key_result.access_key + key_qty += 1 + changed = True + + except boto.exception.BotoServerError, err: + module.fail_json(changed=False, msg=str(err)) + + if keys and key_state: + for access_key in keys: + if access_key in current_keys: + for current_key, current_key_state in zip(current_keys, status): + if key_state != current_key_state.lower(): + try: + iam.update_access_key( + access_key, key_state.capitalize(), user_name=name) + except boto.exception.BotoServerError, err: + module.fail_json(changed=False, msg=str(err)) + else: + changed = True + + if key_state == 'remove': + try: + iam.delete_access_key(access_key, user_name=name) + except boto.exception.BotoServerError, err: + module.fail_json(changed=False, msg=str(err)) + else: + changed = True + + try: + final_keys, final_key_status = \ + [ck['access_key_id'] for ck in + iam.get_all_access_keys(name). + list_access_keys_result. + access_key_metadata],\ + [ck['status'] for ck in + iam.get_all_access_keys(name). + list_access_keys_result. + access_key_metadata] + except boto.exception.BotoServerError, err: + module.fail_json(changed=changed, msg=str(err)) + + for fk, fks in zip(final_keys, final_key_status): + updated_key_list.update({fk: fks}) + + return name_change, updated_key_list, changed + + +def set_users_groups(module, iam, name, groups, updated=None, +new_name=None): + """ Sets groups for a user, will purge groups not explictly passed, while + retaining pre-existing groups that also are in the new list. + """ + changed = False + + if updated: + name = new_name + + try: + orig_users_groups = [og['group_name'] for og in iam.get_groups_for_user( + name).list_groups_for_user_result.groups] + remove_groups = [ + rg for rg in frozenset(orig_users_groups).difference(groups)] + new_groups = [ + ng for ng in frozenset(groups).difference(orig_users_groups)] + except boto.exception.BotoServerError, err: + module.fail_json(changed=changed, msg=str(err)) + else: + if len(orig_users_groups) > 0: + for new in new_groups: + iam.add_user_to_group(new, name) + for rm in remove_groups: + iam.remove_user_from_group(rm, name) + else: + for group in groups: + try: + iam.add_user_to_group(group, name) + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('The group with name %s cannot be found.' % group) in error_msg: + module.fail_json(changed=False, msg="Group %s doesn't exist" % group) + + + if len(remove_groups) > 0 or len(new_groups) > 0: + changed = True + + return (groups, changed) + + +def create_group(module, iam, name, path): + changed = False + try: + iam.create_group( + name, path).create_group_response.create_group_result.group + except boto.exception.BotoServerError, err: + module.fail_json(changed=changed, msg=str(err)) + else: + changed = True + return name, changed + + +def delete_group(module, iam, name): + changed = False + try: + iam.delete_group(name) + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('must detach all policies first') in error_msg: + for policy in iam.get_all_group_policies(name).list_group_policies_result.policy_names: + iam.delete_group_policy(name, policy) + try: + iam.delete_group(name) + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('must detach all policies first') in error_msg: + module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears" + "that %s has Managed Polices. This is not " + "currently supported by boto. Please detach the polices " + "through the console and try again." % name) + else: + module.fail_json(changed=changed, msg=str(err)) + else: + changed = True + else: + changed = True + return changed, name + + +def update_group(module, iam, name, new_name, new_path): + changed = False + try: + current_group_path = iam.get_group( + name).get_group_response.get_group_result.group['path'] + if new_path: + if current_group_path != new_path: + iam.update_group(name, new_path=new_path) + changed = True + if new_name: + if name != new_name: + iam.update_group(name, new_group_name=new_name, new_path=new_path) + changed = True + name = new_name + except boto.exception.BotoServerError, err: + module.fail_json(changed=changed, msg=str(err)) + + return changed, name, new_path, current_group_path + + +def create_role(module, iam, name, path, role_list, prof_list): + changed = False + try: + if name not in role_list: + changed = True + iam.create_role( + name, path=path).create_role_response.create_role_result.role.role_name + + if name not in prof_list: + iam.create_instance_profile(name, path=path) + iam.add_role_to_instance_profile(name, name) + except boto.exception.BotoServerError, err: + module.fail_json(changed=changed, msg=str(err)) + else: + updated_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response. + list_roles_result.roles] + return changed, updated_role_list + + +def delete_role(module, iam, name, role_list, prof_list): + changed = False + try: + if name in role_list: + cur_ins_prof = [rp['instance_profile_name'] for rp in + iam.list_instance_profiles_for_role(name). + list_instance_profiles_for_role_result. + instance_profiles] + for profile in cur_ins_prof: + iam.remove_role_from_instance_profile(profile, name) + try: + iam.delete_role(name) + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('must detach all policies first') in error_msg: + for policy in iam.list_role_policies(name).list_role_policies_result.policy_names: + iam.delete_role_policy(name, policy) + try: + iam.delete_role(name) + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('must detach all policies first') in error_msg: + module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears" + "that %s has Managed Polices. This is not " + "currently supported by boto. Please detach the polices " + "through the console and try again." % name) + else: + module.fail_json(changed=changed, msg=str(err)) + else: + changed = True + + else: + changed = True + + for prof in prof_list: + if name == prof: + iam.delete_instance_profile(name) + except boto.exception.BotoServerError, err: + module.fail_json(changed=changed, msg=str(err)) + else: + updated_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response. + list_roles_result.roles] + return changed, updated_role_list + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + iam_type=dict( + default=None, required=True, choices=['user', 'group', 'role']), + groups=dict(type='list', default=None, required=False), + state=dict( + default=None, required=True, choices=['present', 'absent', 'update']), + password=dict(default=None, required=False), + update_password=dict(default='always', required=False, choices=['always', 'on_create']), + access_key_state=dict(default=None, required=False, choices=[ + 'active', 'inactive', 'create', 'remove', + 'Active', 'Inactive', 'Create', 'Remove']), + access_key_ids=dict(type='list', default=None, required=False), + key_count=dict(type='int', default=1, required=False), + name=dict(default=None, required=False), + new_name=dict(default=None, required=False), + path=dict(default='/', required=False), + new_path=dict(default=None, required=False) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[], + ) + + if not HAS_BOTO: + module.fail_json(msg='This module requires boto, please install it') + + state = module.params.get('state').lower() + iam_type = module.params.get('iam_type').lower() + groups = module.params.get('groups') + name = module.params.get('name') + new_name = module.params.get('new_name') + password = module.params.get('password') + update_pw = module.params.get('update_password') + path = module.params.get('path') + new_path = module.params.get('new_path') + key_count = module.params.get('key_count') + key_state = module.params.get('access_key_state') + if key_state: + key_state = key_state.lower() + if any([n in key_state for n in ['active', 'inactive']]) and not key_ids: + module.fail_json(changed=False, msg="At least one access key has to be defined in order" + " to use 'active' or 'inactive'") + key_ids = module.params.get('access_key_ids') + + if iam_type == 'user' and module.params.get('password') is not None: + pwd = module.params.get('password') + elif iam_type != 'user' and module.params.get('password') is not None: + module.fail_json(msg="a password is being specified when the iam_type " + "is not user. Check parameters") + else: + pwd = None + + if iam_type != 'user' and (module.params.get('access_key_state') is not None or + module.params.get('access_key_id') is not None): + module.fail_json(msg="the IAM type must be user, when IAM access keys " + "are being modified. Check parameters") + + if iam_type == 'role' and state == 'update': + module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, " + "please specificy present or absent") + + ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + + try: + iam = boto.iam.connection.IAMConnection( + aws_access_key_id=aws_access_key, + aws_secret_access_key=aws_secret_key, + ) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + result = {} + changed = False + + orig_group_list = [gl['group_name'] for gl in iam.get_all_groups(). + list_groups_result. + groups] + orig_user_list = [ul['user_name'] for ul in iam.get_all_users(). + list_users_result. + users] + orig_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response. + list_roles_result. + roles] + orig_prof_list = [ap['instance_profile_name'] for ap in iam.list_instance_profiles(). + list_instance_profiles_response. + list_instance_profiles_result. + instance_profiles] + + if iam_type == 'user': + been_updated = False + user_groups = None + user_exists = any([n in [name, new_name] for n in orig_user_list]) + if user_exists: + current_path = iam.get_user(name).get_user_result.user['path'] + if not new_path and current_path != path: + new_path = path + path = current_path + + if state == 'present' and not user_exists and not new_name: + (meta, changed) = create_user( + module, iam, name, password, path, key_state, key_count) + keys = iam.get_all_access_keys(name).list_access_keys_result.\ + access_key_metadata + if groups: + (user_groups, changed) = set_users_groups( + module, iam, name, groups, been_updated, new_name) + module.exit_json( + user_meta=meta, groups=user_groups, keys=keys, changed=changed) + + elif state in ['present', 'update'] and user_exists: + if update_pw == 'on_create': + password = None + if name not in orig_user_list and new_name in orig_user_list: + been_updated = True + name_change, key_list, user_changed = update_user( + module, iam, name, new_name, new_path, key_state, key_count, key_ids, password, been_updated) + if name_change and new_name: + orig_name = name + name = new_name + if groups: + user_groups, groups_changed = set_users_groups( + module, iam, name, groups, been_updated, new_name) + if groups_changed == user_changed: + changed = groups_changed + else: + changed = True + else: + changed = user_changed + if new_name and new_path: + module.exit_json(changed=changed, groups=user_groups, old_user_name=orig_name, + new_user_name=new_name, old_path=path, new_path=new_path, keys=key_list) + elif new_name and not new_path and not been_updated: + module.exit_json( + changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, keys=key_list) + elif new_name and not new_path and been_updated: + module.exit_json( + changed=changed, groups=user_groups, user_name=new_name, keys=key_list, key_state=key_state) + elif not new_name and new_path: + module.exit_json( + changed=changed, groups=user_groups, user_name=name, old_path=path, new_path=new_path, keys=key_list) + else: + module.exit_json( + changed=changed, groups=user_groups, user_name=name, keys=key_list) + elif state == 'update' and not user_exists: + module.fail_json( + msg="The user %s does not exit. No update made." % name) + elif state == 'absent': + if name in orig_user_list: + set_users_groups(module, iam, name, '') + del_meta, name, changed = delete_user(module, iam, name) + module.exit_json( + deletion_meta=del_meta, deleted_user=name, changed=changed) + else: + module.exit_json( + changed=False, msg="User %s is already absent from your AWS IAM users" % name) + + elif iam_type == 'group': + group_exists = name in orig_group_list + + if state == 'present' and not group_exists: + new_group, changed = create_group(iam, name, path) + module.exit_json(changed=changed, group_name=new_group) + elif state in ['present', 'update'] and group_exists: + changed, updated_name, updated_path, cur_path = update_group( + iam, name, new_name, new_path) + + if new_path and new_name: + module.exit_json(changed=changed, old_group_name=name, + new_group_name=updated_name, old_path=cur_path, + new_group_path=updated_path) + + if new_path and not new_name: + module.exit_json(changed=changed, group_name=name, + old_path=cur_path, + new_group_path=updated_path) + + if not new_path and new_name: + module.exit_json(changed=changed, old_group_name=name, + new_group_name=updated_name, group_path=cur_path) + + if not new_path and not new_name: + module.exit_json( + changed=changed, group_name=name, group_path=cur_path) + elif state == 'update' and not group_exists: + module.fail_json( + changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name) + elif state == 'absent': + if name in orig_group_list: + removed_group, changed = delete_group(iam, name) + module.exit_json(changed=changed, delete_group=removed_group) + else: + module.exit_json(changed=changed, msg="Group already absent") + + elif iam_type == 'role': + role_list = [] + if state == 'present': + changed, role_list = create_role( + module, iam, name, path, orig_role_list, orig_prof_list) + elif state == 'absent': + changed, role_list = delete_role( + module, iam, name, orig_role_list, orig_prof_list) + elif state == 'update': + module.fail_json( + changed=False, msg='Role update not currently supported by boto.') + module.exit_json(changed=changed, roles=role_list) + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() From fc255fbd21cf75d23b1a9b40f01131e5a0fd3ee7 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 5 Jun 2015 14:09:02 -0400 Subject: [PATCH 175/464] minor doc fixes --- cloud/amazon/iam.py | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index 05c74c5b716..a4111ee5477 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -28,48 +28,42 @@ options: required: true default: null choices: [ "user", "group", "role"] - aliases: [] name: description: - Name of IAM resource to create or identify required: true - aliases: [] new_name: description: - When state is update, will replace name with new_name on IAM resource required: false - aliases: [] + default: null new_path: description: - When state is update, will replace the path with new_path on the IAM resource required: false - aliases: [] + default: null state: description: - Whether to create, delete or update the IAM resource. Note, roles cannot be updated. required: true default: null choices: [ "present", "absent", "update" ] - aliases: [] path: description: - When creating or updating, specify the desired path of the resource. If state is present, it will replace the current path to match what is passed in when they do not match. required: false default: "/" - aliases: [] access_key_state: description: - When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified. required: false default: null choices: [ "create", "remove", "active", "inactive"] - aliases: [] key_count: description: - When access_key_state is create it will ensure this quantity of keys are present. Defaults to 1. required: false default: '1' - aliases: [] access_key_ids: description: - A list of the keys that you want impacted by the access_key_state paramter. @@ -78,13 +72,11 @@ options: - A list of groups the user should belong to. When update, will gracefully remove groups not listed. required: false default: null - aliases: [] password: description: - When type is user and state is present, define the users login password. Also works with update. Note that always returns changed. required: false default: null - aliases: [] update_password: required: false default: always @@ -103,12 +95,9 @@ options: required: false default: null aliases: [ 'ec2_access_key', 'access_key' ] - - -requirements: [ "boto" ] notes: - 'Currently boto does not support the removal of Managed Policies, the module will error out if your user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.' -author: Jonathan I. Davila and Paul Seiffert +author: Jonathan I. Davila and Paul Seiffert (@defionscode) extends_documentation_fragment: aws ''' From 627933049c25b71e3b5d5443b0c41998a72b99ce Mon Sep 17 00:00:00 2001 From: az7arul Date: Sat, 6 Jun 2015 01:54:00 +0600 Subject: [PATCH 176/464] add `include_doc` option --- packaging/language/gem.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/packaging/language/gem.py b/packaging/language/gem.py index 54d06da7466..8929dbbfd28 100644 --- a/packaging/language/gem.py +++ b/packaging/language/gem.py @@ -73,6 +73,12 @@ options: required: false default: "no" version_added: "1.6" + include_doc: + description: + - Install with or without docs. + required: false + default: "no" + version_added: "2.0" build_flags: description: - Allow adding build flags for gem compilation @@ -187,8 +193,9 @@ def install(module): cmd.append('--no-user-install') if module.params['pre_release']: cmd.append('--pre') - cmd.append('--no-rdoc') - cmd.append('--no-ri') + if not module.params['include_doc']: + cmd.append('--no-rdoc') + cmd.append('--no-ri') cmd.append(module.params['gem_source']) if module.params['build_flags']: cmd.extend([ '--', module.params['build_flags'] ]) @@ -206,6 +213,7 @@ def main(): state = dict(required=False, default='present', choices=['present','absent','latest'], type='str'), user_install = dict(required=False, default=True, type='bool'), pre_release = dict(required=False, default=False, type='bool'), + include_doc = dict(required=False, default=False, type-'bool'), version = dict(required=False, type='str'), build_flags = dict(required=False, type='str'), ), From 0bdf9f4d27262711430dc9ed0858636def034768 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 31 Mar 2015 16:37:07 -0400 Subject: [PATCH 177/464] Add OpenStack Image module Also deprecate old glance_image module --- .../{glance_image.py => _glance_image.py} | 1 + cloud/openstack/os_image.py | 193 ++++++++++++++++++ 2 files changed, 194 insertions(+) rename cloud/openstack/{glance_image.py => _glance_image.py} (99%) create mode 100644 cloud/openstack/os_image.py diff --git a/cloud/openstack/glance_image.py b/cloud/openstack/_glance_image.py similarity index 99% rename from cloud/openstack/glance_image.py rename to cloud/openstack/_glance_image.py index 97b89f03484..0f2de791b38 100644 --- a/cloud/openstack/glance_image.py +++ b/cloud/openstack/_glance_image.py @@ -20,6 +20,7 @@ DOCUMENTATION = ''' --- module: glance_image version_added: "1.2" +deprecated: Deprecated in 1.10. Use os_image instead short_description: Add/Delete images from glance description: - Add or Remove images from the glance repository. diff --git a/cloud/openstack/os_image.py b/cloud/openstack/os_image.py new file mode 100644 index 00000000000..bdb8755e84c --- /dev/null +++ b/cloud/openstack/os_image.py @@ -0,0 +1,193 @@ +#!/usr/bin/python + +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +#TODO(mordred): we need to support "location"(v1) and "locations"(v2) +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + + +DOCUMENTATION = ''' +--- +module: os_image +short_description: Add/Delete images from OpenStack Cloud +extends_documentation_fragment: openstack +version_added: "1.10" +description: + - Add or Remove images from the OpenStack Image Repository +options: + name: + description: + - Name that has to be given to the image + required: true + default: None + disk_format: + description: + - The format of the disk that is getting uploaded + required: false + default: qcow2 + container_format: + description: + - The format of the container + required: false + default: bare + owner: + description: + - The owner of the image + required: false + default: None + min_disk: + description: + - The minimum disk space required to deploy this image + required: false + default: None + min_ram: + description: + - The minimum ram required to deploy this image + required: false + default: None + is_public: + description: + - Whether the image can be accessed publicly. Note that publicizing an image requires admin role by default. + required: false + default: 'yes' + filename: + description: + - The path to the file which has to be uploaded + required: false + default: None + ramdisk: + descrption: + - The name of an existing ramdisk image that will be associated with this image + required: false + default: None + kernel: + descrption: + - The name of an existing kernel image that will be associated with this image + required: false + default: None + properties: + description: + - Additional properties to be associated with this image +requirements: ["shade"] + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +''' + +EXAMPLES = ''' +# Upload an image from a local file named cirros-0.3.0-x86_64-disk.img +- os_image: + auth: + username: admin + password: passme + project_name: admin + name: cirros + container_format: bare + disk_format: qcow2 + state: present + filename: cirros-0.3.0-x86_64-disk.img + kernel: cirros-vmlinuz + ramdisk: cirros-initrd + properties: + cpu_arch: x86_64 + distro: ubuntu +''' + +import time + + +def _glance_delete_image(module, params, client): + try: + for image in client.images.list(): + if image.name == params['name']: + client.images.delete(image) + except Exception, e: + module.fail_json(msg="Error in deleting image: %s" % e.message) + module.exit_json(changed=True, result="Deleted") + + +def main(): + + argument_spec = openstack_full_argument_spec( + name = dict(required=True), + disk_format = dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']), + container_format = dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova']), + owner = dict(default=None), + min_disk = dict(default=None), + min_ram = dict(default=None), + is_public = dict(default=False), + filename = dict(default=None), + ramdisk = dict(default=None), + kernel = dict(default=None), + properties = dict(default={}), + state = dict(default='present', choices=['absent', 'present']), + ) + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + try: + cloud = shade.openstack_cloud(**module.params) + + changed = False + image = cloud.get_image(name_or_id=module.params['name']) + + if module.params['state'] == 'present': + if not image: + result = cloud.create_image( + name=module.params['name'], + filename=module.params['filename'], + disk_format=module.params['disk_format'], + container_format=module.params['container_format'], + wait=module.params['wait'], + timeout=module.params['timeout'] + ) + changed = True + if not module.params['wait']: + module.exit_json(changed=changed, result=result) + image = cloud.get_image(name_or_id=result['id']) + + cloud.update_image_properties( + image=image, + kernel=module.params['kernel'], + ramdisk=module.params['ramdisk'], + **module.params['properties']) + + if module.params['state'] == 'absent': + if not image: + module.exit_json(changed=False, result="success") + else: + _glance_delete_image(module, module.params, cloud.glance_client) + changed = True + + module.exit_json(changed=changed, id=image.id, result="success") + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message, extra_data=e.extra_data) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() From bd26df1c578b0c703daa52ea8c3c8a693320594f Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 1 Apr 2015 10:09:09 -0400 Subject: [PATCH 178/464] Don't update image properties if we're deleting --- cloud/openstack/os_image.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cloud/openstack/os_image.py b/cloud/openstack/os_image.py index bdb8755e84c..7842137f7d9 100644 --- a/cloud/openstack/os_image.py +++ b/cloud/openstack/os_image.py @@ -169,13 +169,13 @@ def main(): module.exit_json(changed=changed, result=result) image = cloud.get_image(name_or_id=result['id']) - cloud.update_image_properties( - image=image, - kernel=module.params['kernel'], - ramdisk=module.params['ramdisk'], - **module.params['properties']) + cloud.update_image_properties( + image=image, + kernel=module.params['kernel'], + ramdisk=module.params['ramdisk'], + **module.params['properties']) - if module.params['state'] == 'absent': + elif module.params['state'] == 'absent': if not image: module.exit_json(changed=False, result="success") else: From 3ffebeac5b575721660783fb15ece6713aad44a2 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 1 Apr 2015 10:43:29 -0400 Subject: [PATCH 179/464] Use the delete method from shade Sigh. Turns out glance v1 and glance v2 delete images differently too. --- cloud/openstack/os_image.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/cloud/openstack/os_image.py b/cloud/openstack/os_image.py index 7842137f7d9..7e97b3800a9 100644 --- a/cloud/openstack/os_image.py +++ b/cloud/openstack/os_image.py @@ -116,16 +116,6 @@ EXAMPLES = ''' import time -def _glance_delete_image(module, params, client): - try: - for image in client.images.list(): - if image.name == params['name']: - client.images.delete(image) - except Exception, e: - module.fail_json(msg="Error in deleting image: %s" % e.message) - module.exit_json(changed=True, result="Deleted") - - def main(): argument_spec = openstack_full_argument_spec( @@ -179,7 +169,10 @@ def main(): if not image: module.exit_json(changed=False, result="success") else: - _glance_delete_image(module, module.params, cloud.glance_client) + cloud.delete_image( + name_or_id=module.params['name'], + wait=module.params['wait'], + timeout=module.params['timeout']) changed = True module.exit_json(changed=changed, id=image.id, result="success") From 86fc12e27900ff0f5d5a5842fa663d69058c3a65 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 5 Jun 2015 16:01:15 -0400 Subject: [PATCH 180/464] Update doc params and version_added --- cloud/openstack/os_image.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/cloud/openstack/os_image.py b/cloud/openstack/os_image.py index 7e97b3800a9..ffc99064ad2 100644 --- a/cloud/openstack/os_image.py +++ b/cloud/openstack/os_image.py @@ -1,6 +1,6 @@ #!/usr/bin/python -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. # Copyright (c) 2013, Benno Joy # # This module is free software: you can redistribute it and/or modify @@ -29,7 +29,7 @@ DOCUMENTATION = ''' module: os_image short_description: Add/Delete images from OpenStack Cloud extends_documentation_fragment: openstack -version_added: "1.10" +version_added: "2.0" description: - Add or Remove images from the OpenStack Image Repository options: @@ -86,18 +86,21 @@ options: properties: description: - Additional properties to be associated with this image -requirements: ["shade"] + required: false + default: {} state: description: - Should the resource be present or absent. choices: [present, absent] default: present +requirements: ["shade"] ''' EXAMPLES = ''' # Upload an image from a local file named cirros-0.3.0-x86_64-disk.img - os_image: auth: + auth_url: http://localhost/auth/v2.0 username: admin password: passme project_name: admin @@ -113,8 +116,6 @@ EXAMPLES = ''' distro: ubuntu ''' -import time - def main(): From 4e6f879febf771fa81af8782367ca9eae277a452 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 31 Mar 2015 19:28:02 -0400 Subject: [PATCH 181/464] Add Ironic module --- cloud/openstack/os_ironic.py | 212 +++++++++++++++++++++++++++++++++++ 1 file changed, 212 insertions(+) create mode 100644 cloud/openstack/os_ironic.py diff --git a/cloud/openstack/os_ironic.py b/cloud/openstack/os_ironic.py new file mode 100644 index 00000000000..3f28a5b78dc --- /dev/null +++ b/cloud/openstack/os_ironic.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# coding: utf-8 -*- + +# (c) 2014, Hewlett-Packard Development Company, L.P. +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +# TODO FIX UUID/Add node support +DOCUMENTATION = ''' +--- +module: os_ironic +short_description: Create/Delete Bare Metal Resources from OpenStack +version_added: "1.10" +extends_documentation_fragment: openstack +description: + - Create or Remove Ironic nodes from OpenStack. +options: + state: + description: + - Indicates desired state of the resource + choices: ['present', 'absent'] + default: present + uuid: + description: + - globally unique identifier (UUID) to be given to the resource. Will + be auto-generated if not specified. + required: false + default: None + driver: + description: + - The name of the Ironic Driver to use with this node. + required: true + default: None + ironic_url: + description: + - If noauth mode is utilized, this is required to be set to the + endpoint URL for the Ironic API. Use with "auth" and "auth_plugin" + settings set to None. + required: false + default: None + driver_info: + description: + - Information for this server's driver. Will vary based on which + driver is in use. Any sub-field which is populated will be validated + during creation. + power: + - Information necessary to turn this server on / off. This often + includes such things as IPMI username, password, and IP address. + required: true + deploy: + - Information necessary to deploy this server directly, without + using Nova. THIS IS NOT RECOMMENDED. + console: + - Information necessary to connect to this server's serial console. + Not all drivers support this. + management: + - Information necessary to interact with this server's management + interface. May be shared by power_info in some cases. + required: true + nics: + description: + - A list of network interface cards, eg, " - mac: aa:bb:cc:aa:bb:cc" + required: true + properties: + description: + - Definition of the physical characteristics of this server, used for + scheduling purposes + cpu_arch: + description: + - CPU architecture (x86_64, i686, ...) + default: x86_64 + cpus: + description: + - Number of CPU cores this machine has + default: 1 + ram: + description: + - amount of RAM this machine has, in MB + default: 1 + disk_size: + description: + - size of first storage device in this machine (typically + /dev/sda), in GB + default: 1 + +requirements: ["shade"] +''' + +EXAMPLES = ''' +# Enroll a node with some basic properties and driver info +- os_ironic: + cloud: "devstack" + driver: "pxe_ipmitool" + uuid: "a8cb6624-0d9f-4882-affc-046ebb96ec92" + properties: + cpus: 2 + cpu_arch: "x86_64" + ram: 8192 + disk_size: 64 + nics: + - mac: "aa:bb:cc:aa:bb:cc" + - mac: "dd:ee:ff:dd:ee:ff" + driver_info: + power: + ipmi_address: "1.2.3.4" + ipmi_username: "admin" + ipmi_password: "adminpass" + +''' + + +def _parse_properties(module): + p = module.params['properties'] + props = dict( + cpu_arch=p.get('cpu_arch') if p.get('cpu_arch') else 'x86_64', + cpus=p.get('cpus') if p.get('cpus') else 1, + memory_mb=p.get('ram') if p.get('ram') else 1, + local_gb=p.get('disk_size') if p.get('disk_size') else 1, + ) + return props + + +def _parse_driver_info(module): + p = module.params['driver_info'] + info = p.get('power') + if not info: + raise shade.OpenStackCloudException( + "driver_info['power'] is required") + if p.get('console'): + info.update(p.get('console')) + if p.get('management'): + info.update(p.get('management')) + if p.get('deploy'): + info.update(p.get('deploy')) + return info + + +def main(): + argument_spec = openstack_full_argument_spec( + uuid=dict(required=False), + driver=dict(required=True), + driver_info=dict(type='dict', required=True), + nics=dict(type='list', required=True), + properties=dict(type='dict', default={}), + ironic_url=dict(required=False), + ) + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + if (module.params['auth_plugin'] == 'None' and + module.params['ironic_url'] is None): + module.fail_json(msg="Authentication appears disabled, Please " + "define an ironic_url parameter") + + if module.params['ironic_url'] and module.params['auth_plugin'] == 'None': + module.params['auth'] = dict(endpoint=module.params['ironic_url']) + try: + cloud = shade.operator_cloud(**module.params) + server = cloud.get_machine_by_uuid(module.params['uuid']) + + if module.params['state'] == 'present': + properties = _parse_properties(module) + driver_info = _parse_driver_info(module) + kwargs = dict( + uuid=module.params['uuid'], + driver=module.params['driver'], + properties=properties, + driver_info=driver_info, + ) + if server is None: + server = cloud.register_machine(module.params['nics'], + **kwargs) + module.exit_json(changed=True, uuid=server.uuid) + else: + # TODO: compare properties here and update if necessary + # ... but the interface for that is terrible! + module.exit_json(changed=False, + result="Server already present") + if module.params['state'] == 'absent': + if server is not None: + cloud.unregister_machine(module.params['nics'], + module.params['uuid']) + module.exit_json(changed=True, result="deleted") + else: + module.exit_json(changed=False, result="Server not found") + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() From c040ae5374cfddab58f21fda3949c6eaf31268b9 Mon Sep 17 00:00:00 2001 From: Julia Kreger Date: Fri, 1 May 2015 09:47:42 -0400 Subject: [PATCH 182/464] Updating os_ironic module Updating os_ironic module to the most recent version accounting for changes in Ansible devel branch and the shade library since the original creation of the module. --- cloud/openstack/os_ironic.py | 180 +++++++++++++++++++++++++++++++---- 1 file changed, 159 insertions(+), 21 deletions(-) diff --git a/cloud/openstack/os_ironic.py b/cloud/openstack/os_ironic.py index 3f28a5b78dc..e74376a3983 100644 --- a/cloud/openstack/os_ironic.py +++ b/cloud/openstack/os_ironic.py @@ -22,12 +22,11 @@ try: except ImportError: HAS_SHADE = False -# TODO FIX UUID/Add node support +import jsonpatch DOCUMENTATION = ''' --- module: os_ironic short_description: Create/Delete Bare Metal Resources from OpenStack -version_added: "1.10" extends_documentation_fragment: openstack description: - Create or Remove Ironic nodes from OpenStack. @@ -40,7 +39,13 @@ options: uuid: description: - globally unique identifier (UUID) to be given to the resource. Will - be auto-generated if not specified. + be auto-generated if not specified, and name is specified. + - Definition of a UUID will always take precedence to a name value. + required: false + default: None + name: + description: + - unique name identifier to be given to the resource. required: false default: None driver: @@ -48,10 +53,15 @@ options: - The name of the Ironic Driver to use with this node. required: true default: None + chassis_uuid: + description: + - Associate the node with a pre-defined chassis. + required: false + default: None ironic_url: description: - If noauth mode is utilized, this is required to be set to the - endpoint URL for the Ironic API. Use with "auth" and "auth_plugin" + endpoint URL for the Ironic API. Use with "auth" and "auth_type" settings set to None. required: false default: None @@ -99,8 +109,17 @@ options: - size of first storage device in this machine (typically /dev/sda), in GB default: 1 + skip_update_of_driver_password: + description: + - Allows the code that would assert changes to nodes to skip the + update if the change is a single line consisting of the password + field. As of Kilo, by default, passwords are always masked to API + requests, which means the logic as a result always attempts to + re-assert the password field. + required: false + default: false -requirements: ["shade"] +requirements: ["shade", "jsonpatch"] ''' EXAMPLES = ''' @@ -108,7 +127,7 @@ EXAMPLES = ''' - os_ironic: cloud: "devstack" driver: "pxe_ipmitool" - uuid: "a8cb6624-0d9f-4882-affc-046ebb96ec92" + uuid: "00000000-0000-0000-0000-000000000002" properties: cpus: 2 cpu_arch: "x86_64" @@ -122,6 +141,7 @@ EXAMPLES = ''' ipmi_address: "1.2.3.4" ipmi_username: "admin" ipmi_password: "adminpass" + chassis_uuid: "00000000-0000-0000-0000-000000000001" ''' @@ -152,56 +172,174 @@ def _parse_driver_info(module): return info +def _choose_id_value(module): + if module.params['uuid']: + return module.params['uuid'] + if module.params['name']: + return module.params['name'] + return None + + +def _is_value_true(value): + true_values = [True, 'yes', 'Yes', 'True', 'true'] + if value in true_values: + return True + return False + + +def _choose_if_password_only(module, patch): + if len(patch) is 1: + if 'password' in patch[0]['path'] and _is_value_true( + module.params['skip_update_of_masked_password']): + # Return false to aabort update as the password appears + # to be the only element in the patch. + return False + return True + + +def _exit_node_not_updated(module, server): + module.exit_json( + changed=False, + result="Node not updated", + uuid=server['uuid'], + provision_state=server['provision_state'] + ) + + def main(): argument_spec = openstack_full_argument_spec( uuid=dict(required=False), - driver=dict(required=True), + name=dict(required=False), + driver=dict(required=False), driver_info=dict(type='dict', required=True), nics=dict(type='list', required=True), properties=dict(type='dict', default={}), ironic_url=dict(required=False), + chassis_uuid=dict(required=False), + skip_update_of_masked_password=dict(required=False, choices=BOOLEANS), + state=dict(required=False, default='present') ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') - if (module.params['auth_plugin'] == 'None' and + if (module.params['auth_type'] in [None, 'None'] and module.params['ironic_url'] is None): - module.fail_json(msg="Authentication appears disabled, Please " - "define an ironic_url parameter") + module.fail_json(msg="Authentication appears to be disabled, " + "Please define an ironic_url parameter") + + if (module.params['ironic_url'] and + module.params['auth_type'] in [None, 'None']): + module.params['auth'] = dict( + endpoint=module.params['ironic_url'] + ) + + node_id = _choose_id_value(module) - if module.params['ironic_url'] and module.params['auth_plugin'] == 'None': - module.params['auth'] = dict(endpoint=module.params['ironic_url']) try: cloud = shade.operator_cloud(**module.params) - server = cloud.get_machine_by_uuid(module.params['uuid']) - + server = cloud.get_machine(node_id) if module.params['state'] == 'present': + if module.params['driver'] is None: + module.fail_json(msg="A driver must be defined in order " + "to set a node to present.") + properties = _parse_properties(module) driver_info = _parse_driver_info(module) kwargs = dict( - uuid=module.params['uuid'], driver=module.params['driver'], properties=properties, driver_info=driver_info, + name=module.params['name'], ) + + if module.params['chassis_uuid']: + kwargs['chassis_uuid'] = module.params['chassis_uuid'] + if server is None: + # Note(TheJulia): Add a specific UUID to the request if + # present in order to be able to re-use kwargs for if + # the node already exists logic, since uuid cannot be + # updated. + if module.params['uuid']: + kwargs['uuid'] = module.params['uuid'] + server = cloud.register_machine(module.params['nics'], **kwargs) - module.exit_json(changed=True, uuid=server.uuid) + module.exit_json(changed=True, uuid=server['uuid'], + provision_state=server['provision_state']) else: - # TODO: compare properties here and update if necessary - # ... but the interface for that is terrible! - module.exit_json(changed=False, - result="Server already present") + # TODO(TheJulia): Presently this does not support updating + # nics. Support needs to be added. + # + # Note(TheJulia): This message should never get logged + # however we cannot realistically proceed if neither a + # name or uuid was supplied to begin with. + if not node_id: + module.fail_json(msg="A uuid or name value " + "must be defined") + + # Note(TheJulia): Constructing the configuration to compare + # against. The items listed in the server_config block can + # be updated via the API. + + server_config = dict( + driver=server['driver'], + properties=server['properties'], + driver_info=server['driver_info'], + name=server['name'], + ) + + # Add the pre-existing chassis_uuid only if + # it is present in the server configuration. + if hasattr(server, 'chassis_uuid'): + server_config['chassis_uuid'] = server['chassis_uuid'] + + # Note(TheJulia): If a password is defined and concealed, a + # patch will always be generated and re-asserted. + patch = jsonpatch.JsonPatch.from_diff(server_config, kwargs) + + if not patch: + _exit_node_not_updated(module, server) + elif _choose_if_password_only(module, list(patch)): + # Note(TheJulia): Normally we would allow the general + # exception catch below, however this allows a specific + # message. + try: + server = cloud.patch_machine( + server['uuid'], + list(patch)) + except Exception as e: + module.fail_json(msg="Failed to update node, " + "Error: %s" % e.message) + + # Enumerate out a list of changed paths. + change_list = [] + for change in list(patch): + change_list.append(change['path']) + module.exit_json(changed=True, + result="Node Updated", + changes=change_list, + uuid=server['uuid'], + provision_state=server['provision_state']) + + # Return not updated by default as the conditions were not met + # to update. + _exit_node_not_updated(module, server) + if module.params['state'] == 'absent': + if not node_id: + module.fail_json(msg="A uuid or name value must be defined " + "in order to remove a node.") + if server is not None: cloud.unregister_machine(module.params['nics'], - module.params['uuid']) + server['uuid']) module.exit_json(changed=True, result="deleted") else: module.exit_json(changed=False, result="Server not found") + except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From fb4732fad20cb1057962909497e8b57abe941e9f Mon Sep 17 00:00:00 2001 From: Hagai Kariti Date: Thu, 4 Jun 2015 11:41:02 +0300 Subject: [PATCH 183/464] ec2_vol: fix race conditions because we handle errors before actually deleting Just try to delete the volume and handle the error amazon sends --- cloud/amazon/ec2_vol.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 3065b550457..d5b4dd4f3bc 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -239,15 +239,14 @@ def get_volumes(module, ec2): return vols def delete_volume(module, ec2): - vol = get_volume(module, ec2) - if not vol: - module.exit_json(changed=False) - else: - if vol.attachment_state() is not None: - adata = vol.attach_data - module.fail_json(msg="Volume %s is attached to an instance %s." % (vol.id, adata.instance_id)) - ec2.delete_volume(vol.id) - module.exit_json(changed=True) + volume_id = module.params['id'] + try: + ec2.delete_volume(volume_id) + module.exit_json(changed=True) + except boto.exception.EC2ResponseError as ec2_error: + if ec2_error.code == 'InvalidVolume.NotFound': + module.exit_json(changed=False) + module.fail_json(msg=ec2_error.message) def boto_supports_volume_encryption(): """ From 361a1e1b65e65fff29a9fb8555e7559b54545e9e Mon Sep 17 00:00:00 2001 From: Igor Khomyakov Date: Fri, 9 Jan 2015 16:57:20 +0300 Subject: [PATCH 184/464] Check if a service exists --- web_infrastructure/supervisorctl.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/web_infrastructure/supervisorctl.py b/web_infrastructure/supervisorctl.py index f75992b9a6a..f0cfa691c27 100644 --- a/web_infrastructure/supervisorctl.py +++ b/web_infrastructure/supervisorctl.py @@ -30,7 +30,7 @@ version_added: "0.7" options: name: description: - - The name of the supervisord program or group to manage. + - The name of the supervisord program or group to manage. - The name will be taken as group name when it ends with a colon I(:) - Group support is only available in Ansible version 1.6 or later. required: true @@ -192,9 +192,14 @@ def main(): if state == 'restarted': rc, out, err = run_supervisorctl('update') processes = get_matched_processes() + if not processes: + module.fail_json(name=name, msg="ERROR (no such process)") + take_action_on_processes(processes, lambda s: True, 'restart', 'started') processes = get_matched_processes() + if not processes: + module.fail_json(name=name, msg="ERROR (no such process)") if state == 'present': if len(processes) > 0: From 9074aa4c937c2618136e703703118cbda88030bf Mon Sep 17 00:00:00 2001 From: Tal Auslander Date: Mon, 8 Jun 2015 14:45:20 +0300 Subject: [PATCH 185/464] win_get_url re-download file only if modified --- windows/win_get_url.ps1 | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index b555cc7a52c..96189d69113 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -1,7 +1,7 @@ #!powershell # This file is part of Ansible. # -# Copyright 2014, Paul Durivage +# (c)) 2015, Paul Durivage , Tal Auslander # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -40,14 +40,38 @@ Else { Fail-Json $result "missing required argument: dest" } -$client = New-Object System.Net.WebClient +If (-not (Test-Path $dest)) { + $client = New-Object System.Net.WebClient -Try { - $client.DownloadFile($url, $dest) - $result.changed = $true + Try { + $client.DownloadFile($url, $dest) + $result.changed = $true + } + Catch { + Fail-Json $result "Error downloading $url to $dest" + } } -Catch { - Fail-Json $result "Error downloading $url to $dest" +Else { + Try { + $webRequest = [System.Net.HttpWebRequest]::Create($url) + $webRequest.IfModifiedSince = ([System.IO.FileInfo]$dest).LastWriteTime + $webRequest.Method = "GET" + [System.Net.HttpWebResponse]$webResponse = $webRequest.GetResponse() + + $stream = New-Object System.IO.StreamReader($response.GetResponseStream()) + + $stream.ReadToEnd() | Set-Content -Path $dest -Force + + $result.changed = $true + } + Catch [System.Net.WebException] { + If ($_.Exception.Response.StatusCode -ne [System.Net.HttpStatusCode]::NotModified) { + Fail-Json $result "Error downloading $url to $dest" + } + } + Catch { + Fail-Json $result "Error downloading $url to $dest" + } } Set-Attr $result.win_get_url "url" $url From 9f3658d9011d5c0b8655802c54c5b8f38a7c189f Mon Sep 17 00:00:00 2001 From: toninog Date: Mon, 8 Jun 2015 13:55:24 +0100 Subject: [PATCH 186/464] Fixed bug in example where updating a user to a group Fixed bug in create_group and update_group whereby paramaters were not being set correctly. --- cloud/amazon/iam.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index a4111ee5477..d496a7a40c7 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -133,7 +133,7 @@ task: iam_type: user name: jdavila state: update - group: "{{ item.created_group.group_name }}" + groups: "{{ item.created_group.group_name }}" with_items: new_groups.results ''' @@ -376,7 +376,7 @@ new_name=None): return (groups, changed) -def create_group(module, iam, name, path): +def create_group(module=None, iam=None, name=None, path=None): changed = False try: iam.create_group( @@ -414,8 +414,7 @@ def delete_group(module, iam, name): changed = True return changed, name - -def update_group(module, iam, name, new_name, new_path): +def update_group(module=None, iam=None, name=None, new_name=None, new_path=None): changed = False try: current_group_path = iam.get_group( @@ -667,7 +666,7 @@ def main(): module.exit_json(changed=changed, group_name=new_group) elif state in ['present', 'update'] and group_exists: changed, updated_name, updated_path, cur_path = update_group( - iam, name, new_name, new_path) + iam=iam, name=name, new_name=new_name, new_path=new_path) if new_path and new_name: module.exit_json(changed=changed, old_group_name=name, From 68cfbca624c118d85ee1ee2547755cfd356b156d Mon Sep 17 00:00:00 2001 From: toninog Date: Mon, 8 Jun 2015 14:21:49 +0100 Subject: [PATCH 187/464] Fixed more issues with the delete_group and paramater mismatch --- cloud/amazon/iam.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index d496a7a40c7..3cfca55135c 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -133,7 +133,7 @@ task: iam_type: user name: jdavila state: update - groups: "{{ item.created_group.group_name }}" + group: "{{ item.created_group.group_name }}" with_items: new_groups.results ''' @@ -388,7 +388,7 @@ def create_group(module=None, iam=None, name=None, path=None): return name, changed -def delete_group(module, iam, name): +def delete_group(module=None, iam=None, name=None): changed = False try: iam.delete_group(name) @@ -662,7 +662,7 @@ def main(): group_exists = name in orig_group_list if state == 'present' and not group_exists: - new_group, changed = create_group(iam, name, path) + new_group, changed = create_group(iam=iam, name=name, path=path) module.exit_json(changed=changed, group_name=new_group) elif state in ['present', 'update'] and group_exists: changed, updated_name, updated_path, cur_path = update_group( @@ -690,7 +690,7 @@ def main(): changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name) elif state == 'absent': if name in orig_group_list: - removed_group, changed = delete_group(iam, name) + removed_group, changed = delete_group(iam=iam, name=name) module.exit_json(changed=changed, delete_group=removed_group) else: module.exit_json(changed=changed, msg="Group already absent") From b91a232fe668efb21d3a3dcb1a9ce5cc3587b300 Mon Sep 17 00:00:00 2001 From: Alex Gandy Date: Mon, 8 Jun 2015 12:01:27 -0400 Subject: [PATCH 188/464] Added volumes dict to ec2_lc example --- cloud/amazon/ec2_lc.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 18a736600d0..5259479ab5f 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -126,6 +126,12 @@ EXAMPLES = ''' key_name: default security_groups: ['group', 'group2' ] instance_type: t1.micro + volumes: + - device_name: /dev/sda1 + volume_size: 100 + device_type: io1 + iops: 3000 + delete_on_termination: true ''' From a9e68692b70a387792aee2cfe487998ff82826cf Mon Sep 17 00:00:00 2001 From: Dionysis Grigoropoulos Date: Tue, 9 Jun 2015 01:30:27 +0300 Subject: [PATCH 189/464] git: Add option to verify GPG signature of commit --- source_control/git.py | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index 0cb87304a92..5915880abd1 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -160,6 +160,18 @@ options: main project. This is equivalent to specifying the --remote flag to git submodule update. + verify_commit: + required: false + default: "no" + choices: ["yes", "no"] + version_added: "2.0" + description: + - if C(yes), when cloning or checking out a C(version) verify the + signature of a GPG signed commit. This requires C(git) version>=2.1.0 + to be installed. The commit MUST be signed and the public key MUST + be trusted in the GPG trustdb. + + notes: - "If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, @@ -298,7 +310,7 @@ def get_submodule_versions(git_path, module, dest, version='HEAD'): return submodules def clone(git_path, module, repo, dest, remote, depth, version, bare, - reference, refspec): + reference, refspec, verify_commit): ''' makes a new git repo if it does not already exist ''' dest_dirname = os.path.dirname(dest) try: @@ -326,6 +338,9 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare, if refspec: module.run_command([git_path, 'fetch', remote, refspec], check_rc=True, cwd=dest) + if verify_commit: + verify_commit_sign(git_path, module, dest, version) + def has_local_mods(module, git_path, dest, bare): if bare: return False @@ -574,7 +589,7 @@ def submodule_update(git_path, module, dest, track_submodules): return (rc, out, err) -def switch_version(git_path, module, dest, remote, version): +def switch_version(git_path, module, dest, remote, version, verify_commit): cmd = '' if version != 'HEAD': if is_remote_branch(git_path, module, dest, remote, version): @@ -599,8 +614,20 @@ def switch_version(git_path, module, dest, remote, version): module.fail_json(msg="Failed to checkout %s" % (version)) else: module.fail_json(msg="Failed to checkout branch %s" % (branch)) + + if verify_commit: + verify_commit_sign(git_path, module, dest, version) + return (rc, out1, err1) + +def verify_commit_sign(git_path, module, dest, version): + cmd = "%s verify-commit %s" % (git_path, version) + (rc, out, err) = module.run_command(cmd, cwd=dest) + if rc != 0: + module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version) + return (rc, out, err) + # =========================================== def main(): @@ -616,6 +643,7 @@ def main(): depth=dict(default=None, type='int'), clone=dict(default='yes', type='bool'), update=dict(default='yes', type='bool'), + verify_commit=dict(default='no', type='bool'), accept_hostkey=dict(default='no', type='bool'), key_file=dict(default=None, required=False), ssh_opts=dict(default=None, required=False), @@ -637,6 +665,7 @@ def main(): update = module.params['update'] allow_clone = module.params['clone'] bare = module.params['bare'] + verify_commit = module.params['verify_commit'] reference = module.params['reference'] git_path = module.params['executable'] or module.get_bin_path('git', True) key_file = module.params['key_file'] @@ -689,7 +718,7 @@ def main(): remote_head = get_remote_head(git_path, module, dest, version, repo, bare) module.exit_json(changed=True, before=before, after=remote_head) # there's no git config, so clone - clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec) + clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec, verify_commit) repo_updated = True elif not update: # Just return having found a repo already in the dest path @@ -729,7 +758,7 @@ def main(): # switch to version specified regardless of whether # we got new revisions from the repository if not bare: - switch_version(git_path, module, dest, remote, version) + switch_version(git_path, module, dest, remote, version, verify_commit) # Deal with submodules submodules_updated = False From 438d87d26991c26de4a6b1457b154eb8f50d093a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 6 Jun 2015 13:41:22 -0400 Subject: [PATCH 190/464] added innitial daemon-reloaded support to service module --- system/service.py | 126 ++++++++++++++++++++++++++-------------------- 1 file changed, 71 insertions(+), 55 deletions(-) diff --git a/system/service.py b/system/service.py index 3299b614d52..ac7b6994389 100644 --- a/system/service.py +++ b/system/service.py @@ -34,12 +34,13 @@ options: - Name of the service. state: required: false - choices: [ started, stopped, restarted, reloaded ] + choices: [ started, stopped, restarted, reloaded, daemon_reloaded ] description: - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary. C(restarted) will always bounce the service. C(reloaded) will always reload. B(At least one of state and enabled are required.) + - The C(daemon_reloaded) state was added in 2.0, it is exclusive for systemd. sleep: required: false version_added: "1.3" @@ -279,7 +280,7 @@ class Service(object): # Find ps binary psbin = self.module.get_bin_path('ps', True) - (rc, psout, pserr) = self.execute_command('%s %s' % (psbin, psflags)) + (rc, psout, pserr) = execute_command('%s %s' % (psbin, psflags)) # If rc is 0, set running as appropriate if rc == 0: self.running = False @@ -1413,7 +1414,7 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - state = dict(choices=['running', 'started', 'stopped', 'restarted', 'reloaded']), + state = dict(choices=['running', 'started', 'stopped', 'restarted', 'reloaded', 'daemon_reloaded']), sleep = dict(required=False, type='int', default=None), pattern = dict(required=False, default=None), enabled = dict(type='bool'), @@ -1440,66 +1441,81 @@ def main(): result = {} result['name'] = service.name - # Find service management tools - service.get_service_tools() - - # Enable/disable service startup at boot if requested - if service.module.params['enabled'] is not None: - # FIXME: ideally this should detect if we need to toggle the enablement state, though - # it's unlikely the changed handler would need to fire in this case so it's a minor thing. - service.service_enable() - result['enabled'] = service.enable - - if module.params['state'] is None: - # Not changing the running state, so bail out now. - result['changed'] = service.changed - module.exit_json(**result) + # shortcut for systemd only daemon-reloaded + if module.params['state'] == 'daemon_reloaded': + cmd = module.get_bin_path('systemctl', True) + svc_cmd = "%s %s %s" % (cmd, service.name, 'daemon-reloaded') + rc, stdout, stderr = module.run_command(svc_cmd) + result['msg']=stdout + if rc != 0: + result['rc'] = rc + if stderr: + result['msg']=stderr + module.fail_json(**result) - result['state'] = service.state + result['changed']=True - # Collect service status - if service.pattern: - service.check_ps() else: - service.get_service_status() + # Find service management tools + service.get_service_tools() + + # Enable/disable service startup at boot if requested + if service.module.params['enabled'] is not None: + # FIXME: ideally this should detect if we need to toggle the enablement state, though + # it's unlikely the changed handler would need to fire in this case so it's a minor thing. + service.service_enable() + result['enabled'] = service.enable + + if module.params['state'] is None: + # Not changing the running state, so bail out now. + result['changed'] = service.changed + module.exit_json(**result) + + result['state'] = service.state + + # Collect service status + if service.pattern: + service.check_ps() + else: + service.get_service_status() - # Calculate if request will change service state - service.check_service_changed() + # Calculate if request will change service state + service.check_service_changed() - # Modify service state if necessary - (rc, out, err) = service.modify_service_state() + # Modify service state if necessary + (rc, out, err) = service.modify_service_state() - if rc != 0: - if err and "Job is already running" in err: - # upstart got confused, one such possibility is MySQL on Ubuntu 12.04 - # where status may report it has no start/stop links and we could - # not get accurate status - pass - else: - if err: - module.fail_json(msg=err) + if rc != 0: + if err and "Job is already running" in err: + # upstart got confused, one such possibility is MySQL on Ubuntu 12.04 + # where status may report it has no start/stop links and we could + # not get accurate status + pass else: - module.fail_json(msg=out) - - result['changed'] = service.changed | service.svc_change - if service.module.params['enabled'] is not None: - result['enabled'] = service.module.params['enabled'] - - if not service.module.params['state']: - status = service.get_service_status() - if status is None: - result['state'] = 'absent' - elif status is False: - result['state'] = 'started' - else: - result['state'] = 'stopped' - else: - # as we may have just bounced the service the service command may not - # report accurate state at this moment so just show what we ran - if service.module.params['state'] in ['started','restarted','running','reloaded']: - result['state'] = 'started' + if err: + module.fail_json(msg=err) + else: + module.fail_json(msg=out) + + result['changed'] = service.changed | service.svc_change + if service.module.params['enabled'] is not None: + result['enabled'] = service.module.params['enabled'] + + if not service.module.params['state']: + status = service.get_service_status() + if status is None: + result['state'] = 'absent' + elif status is False: + result['state'] = 'started' + else: + result['state'] = 'stopped' else: - result['state'] = 'stopped' + # as we may have just bounced the service the service command may not + # report accurate state at this moment so just show what we ran + if service.module.params['state'] in ['started','restarted','running','reloaded']: + result['state'] = 'started' + else: + result['state'] = 'stopped' module.exit_json(**result) From d6ed6113a77a6e327cf12d3955022321c5b12efe Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 8 Jun 2015 20:12:10 -0400 Subject: [PATCH 191/464] may root help us all --- packaging/os/package.py | 52 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 packaging/os/package.py diff --git a/packaging/os/package.py b/packaging/os/package.py new file mode 100644 index 00000000000..38762c65fa6 --- /dev/null +++ b/packaging/os/package.py @@ -0,0 +1,52 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Red Hat, Inc +# Written by Seth Vidal +# (c) 2014, Epic Games, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: package +version_added: 2.0 +author: Ansible Core Team +short_description: Generic OS package manager +description: + - Installs, upgrade and removes packages using the underlying OS package manager. +options: + name: + description: + - "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: yum -y update. You can also pass a url or a local path to a rpm file. To operate on several packages this can accept a comma separated list of packages or (as of 2.0) a list of packages." + required: true + state: + description: + - Whether to install (C(present), C(latest)), or remove (C(absent)) a package. + required: true +requirements: + - Whatever is required for the package plugins specific for each system. +notes: + - This module actually calls the pertinent package modules for each system (apt, yum, etc). +''' +EXAMPLES = ''' +- name: install the latest version of Vim + package: name=vim-minimal state=latest + +- name: remove the Vim package + package : name=vim-minimal state=absent +''' From 75c2bc54f5e85538213f401531704a7879fe9dbc Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 8 Jun 2015 20:18:59 -0400 Subject: [PATCH 192/464] corrected copyright ... does a copied the copyrightnotice require copyright? --- packaging/os/package.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/packaging/os/package.py b/packaging/os/package.py index 38762c65fa6..1a27470cbc8 100644 --- a/packaging/os/package.py +++ b/packaging/os/package.py @@ -1,9 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# (c) 2012, Red Hat, Inc -# Written by Seth Vidal -# (c) 2014, Epic Games, Inc. +# (c) 2015, Ansible, inc # # This file is part of Ansible # From fd18aba076b7b31d8f6e591fab396706d4d7e4c1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 8 Jun 2015 20:25:39 -0400 Subject: [PATCH 193/464] updated package examples --- packaging/os/package.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/packaging/os/package.py b/packaging/os/package.py index 1a27470cbc8..7c94b98a941 100644 --- a/packaging/os/package.py +++ b/packaging/os/package.py @@ -42,9 +42,10 @@ notes: - This module actually calls the pertinent package modules for each system (apt, yum, etc). ''' EXAMPLES = ''' -- name: install the latest version of Vim - package: name=vim-minimal state=latest +- name: install the latest version of ntpdate + package: name=ntpdate state=latest -- name: remove the Vim package - package : name=vim-minimal state=absent +# This uses a variable as this changes per distribution. +- name: remove the apache package + package : name={{apache}} state=absent ''' From 4b9b047878f20db9b5a37d2e5b0e98bf43ff312f Mon Sep 17 00:00:00 2001 From: Trevor Pounds Date: Thu, 26 Feb 2015 15:38:42 -0800 Subject: [PATCH 194/464] Use auto scaling group managed ELBs if present. --- cloud/amazon/ec2_elb.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py index 11abd827b2b..396f9ab0c9b 100644 --- a/cloud/amazon/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -105,6 +105,7 @@ import os try: import boto import boto.ec2 + import boto.ec2.autoscale import boto.ec2.elb from boto.regioninfo import RegionInfo except ImportError: @@ -255,6 +256,9 @@ class ElbManager: for elb lookup instead of returning what elbs are attached to self.instance_id""" + if not ec2_elbs: + ec2_elbs = self._get_auto_scaling_group_lbs() + try: elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) @@ -273,6 +277,32 @@ class ElbManager: lbs.append(lb) return lbs + def _get_auto_scaling_group_lbs(self): + """Returns a list of ELBs associated with self.instance_id + indirectly through its auto scaling group membership""" + + try: + asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError), e: + self.module.fail_json(msg=str(e)) + + asg_instances = asg.get_all_autoscaling_instances([self.instance_id]) + if len(asg_instances) > 1: + self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.") + + if not asg_instances: + asg_elbs = [] + else: + asg_name = asg_instances[0].group_name + + asgs = asg.get_all_groups([asg_name]) + if len(asg_instances) != 1: + self.module.fail_json(msg="Illegal state, expected one auto scaling group.") + + asg_elbs = asgs[0].load_balancers + + return asg_elbs + def _get_instance(self): """Returns a boto.ec2.InstanceObject for self.instance_id""" try: From 1730764531f07d028a9c4e96f20f1c03a1949606 Mon Sep 17 00:00:00 2001 From: Tal Auslander Date: Tue, 9 Jun 2015 11:14:44 +0300 Subject: [PATCH 195/464] add force parameter to win_get_url --- windows/win_get_url.ps1 | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index 96189d69113..e5e1ea73c83 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -40,7 +40,9 @@ Else { Fail-Json $result "missing required argument: dest" } -If (-not (Test-Path $dest)) { +$force = Get-Attr -obj $params -name "force" "no" + +If ($force -eq "yes" -or -not (Test-Path $dest)) { $client = New-Object System.Net.WebClient Try { From fccc925ec51cc4c02236f1c5fbd3c1cfe94c0374 Mon Sep 17 00:00:00 2001 From: toninog Date: Tue, 9 Jun 2015 15:27:15 +0100 Subject: [PATCH 196/464] fixes to code to enable updates of user to groups and delete groups. Fixed example yaml to use groups --- cloud/amazon/iam.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index a4111ee5477..70ae9ba75a5 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -133,7 +133,7 @@ task: iam_type: user name: jdavila state: update - group: "{{ item.created_group.group_name }}" + groups: "{{ item.created_group.group_name }}" with_items: new_groups.results ''' @@ -376,7 +376,7 @@ new_name=None): return (groups, changed) -def create_group(module, iam, name, path): +def create_group(module=None, iam=None, name=None, path=None): changed = False try: iam.create_group( @@ -388,7 +388,7 @@ def create_group(module, iam, name, path): return name, changed -def delete_group(module, iam, name): +def delete_group(module=None, iam=None, name=None): changed = False try: iam.delete_group(name) @@ -414,8 +414,7 @@ def delete_group(module, iam, name): changed = True return changed, name - -def update_group(module, iam, name, new_name, new_path): +def update_group(module=None, iam=None, name=None, new_name=None, new_path=None): changed = False try: current_group_path = iam.get_group( @@ -663,11 +662,11 @@ def main(): group_exists = name in orig_group_list if state == 'present' and not group_exists: - new_group, changed = create_group(iam, name, path) + new_group, changed = create_group(iam=iam, name=name, path=path) module.exit_json(changed=changed, group_name=new_group) elif state in ['present', 'update'] and group_exists: changed, updated_name, updated_path, cur_path = update_group( - iam, name, new_name, new_path) + iam=iam, name=name, new_name=new_name, new_path=new_path) if new_path and new_name: module.exit_json(changed=changed, old_group_name=name, @@ -691,7 +690,7 @@ def main(): changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name) elif state == 'absent': if name in orig_group_list: - removed_group, changed = delete_group(iam, name) + removed_group, changed = delete_group(iam=iam, name=name) module.exit_json(changed=changed, delete_group=removed_group) else: module.exit_json(changed=changed, msg="Group already absent") From c298741aa64bda04be2d317f1e0fd0fa5abb21f6 Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Tue, 9 Jun 2015 15:24:38 -0400 Subject: [PATCH 197/464] Update for latest shade API Shade version 0.7.0 introduces new API methods for creating, deleting, and updating security groups. Let's use those and clean up the module. --- cloud/openstack/os_security_group.py | 101 ++++++++++++++++----------- 1 file changed, 62 insertions(+), 39 deletions(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index 193a156251a..55422ac20a3 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -18,8 +18,9 @@ try: import shade + HAS_SHADE = True except ImportError: - print("failed=True msg='shade is required for this module'") + HAS_SHADE = False DOCUMENTATION = ''' @@ -51,58 +52,80 @@ requirements: ["shade"] EXAMPLES = ''' # Create a security group -- os_security_group: cloud=mordred name=foo - description=security group for foo servers +- os_security_group: + cloud=mordred + name=foo + description=security group for foo servers ''' -def _security_group(module, nova_client, action='create', **kwargs): - f = getattr(nova_client.security_groups, action) - try: - secgroup = f(**kwargs) - except Exception, e: - module.fail_json(msg='Failed to %s security group %s: %s' % - (action, module.params['name'], e.message)) +def _needs_update(module, secgroup): + """Check for differences in the updatable values. + NOTE: We don't currently allow name updates. + """ + if secgroup['description'] != module.params['description']: + return True + return False -def main(): +def _system_state_change(module, secgroup): + state = module.params['state'] + if state == 'present': + if not secgroup: + return True + return _needs_update(module, secgroup) + if state == 'absent' and secgroup: + return True + return False + + +def main(): argument_spec = openstack_full_argument_spec( - name = dict(required=True), - description = dict(default=None), - state = dict(default='present', choices=['absent', 'present']), + name=dict(required=True), + description=dict(default=None), + state=dict(default='present', choices=['absent', 'present']), ) + module_kwargs = openstack_module_kwargs() - module = AnsibleModule(argument_spec, **module_kwargs) + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + name = module.params['name'] + state = module.params['state'] + description = module.params['description'] try: cloud = shade.openstack_cloud(**module.params) - nova_client = cloud.nova_client - changed = False - secgroup = cloud.get_security_group(module.params['name']) + secgroup = cloud.get_security_group(name) - if module.params['state'] == 'present': - secgroup = cloud.get_security_group(module.params['name']) + if module.check_mode: + module.exit_json(changed=_system_state_change(module, secgroup)) + + if state == 'present': + if not secgroup: + secgroup = cloud.create_security_group(name, description) + module.exit_json(changed=True, result='created', + id=secgroup['id']) + else: + if _needs_update(module, secgroup): + secgroup = cloud.update_security_group( + secgroup['id'], description=description) + module.exit_json(changed=True, result='updated', + id=secgroup['id']) + else: + module.exit_json(changed=False, result='success') + + if state == 'absent': if not secgroup: - _security_group(module, nova_client, action='create', - name=module.params['name'], - description=module.params['description']) - changed = True - - if secgroup and secgroup.description != module.params['description']: - _security_group(module, nova_client, action='update', - group=secgroup.id, - name=module.params['name'], - description=module.params['description']) - changed = True - - if module.params['state'] == 'absent': - if secgroup: - _security_group(module, nova_client, action='delete', - group=secgroup.id) - changed = True - - module.exit_json(changed=changed, id=module.params['name'], result="success") + module.exit_json(changed=False, result='success') + else: + cloud.delete_security_group(secgroup['id']) + module.exit_json(changed=True, result='deleted') except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From 5be1b64b85b58b5c2777485394205ce0190bb1e7 Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Tue, 9 Jun 2015 16:18:38 -0400 Subject: [PATCH 198/464] Update the docstring for os_security_group Indicate that idempotence is on security group names, and give an example for updating a security group description. --- cloud/openstack/os_security_group.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index 55422ac20a3..bf316962a39 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -34,7 +34,8 @@ description: options: name: description: - - Name that has to be given to the security group + - Name that has to be given to the security group. This module + requires that security group names be unique. required: true description: description: @@ -54,8 +55,16 @@ EXAMPLES = ''' # Create a security group - os_security_group: cloud=mordred + state=present name=foo description=security group for foo servers + +# Update the existing 'foo' security group description +- os_security_group: + cloud=mordred + state=present + name=foo + description=updated description for the foo security group ''' From 62073565e1bec66b22450d0495b476fd8d8419e6 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 9 Jun 2015 19:43:27 -0400 Subject: [PATCH 199/464] Update version added --- cloud/openstack/os_ironic.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/openstack/os_ironic.py b/cloud/openstack/os_ironic.py index e74376a3983..137effe6073 100644 --- a/cloud/openstack/os_ironic.py +++ b/cloud/openstack/os_ironic.py @@ -28,6 +28,7 @@ DOCUMENTATION = ''' module: os_ironic short_description: Create/Delete Bare Metal Resources from OpenStack extends_documentation_fragment: openstack +version_added: "2.0" description: - Create or Remove Ironic nodes from OpenStack. options: From d60d49391629dc692185ad4fa148f0f6f29d7a7a Mon Sep 17 00:00:00 2001 From: Louis-Michel Couture Date: Wed, 10 Jun 2015 01:56:32 -0400 Subject: [PATCH 200/464] Update example to remove outdated information --- database/mysql/mysql_user.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 97150775507..c6d34ea0635 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -149,8 +149,6 @@ mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanotherdb.*:ALL - mysql_user: name=root password=abc123 login_unix_socket=/var/run/mysqld/mysqld.sock # Example .my.cnf file for setting the root password -# Note: don't use quotes around the password, because the mysql_user module -# will include them in the password but the mysql client will not [client] user=root From 16ba1fc0e97a7e25d6c70862c3264fb028454b15 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 10 Jun 2015 11:08:12 -0400 Subject: [PATCH 201/464] updated version added --- windows/win_copy.py | 2 +- windows/win_template.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/windows/win_copy.py b/windows/win_copy.py index 7d0b49e5985..16b6859488f 100644 --- a/windows/win_copy.py +++ b/windows/win_copy.py @@ -24,7 +24,7 @@ import time DOCUMENTATION = ''' --- module: win_copy -version_added: "1.8" +version_added: "2.0" short_description: Copies files to remote locations on windows hosts. description: - The M(win_copy) module copies a file on the local box to remote windows locations. diff --git a/windows/win_template.py b/windows/win_template.py index 402702f93b2..d95d1125fcc 100644 --- a/windows/win_template.py +++ b/windows/win_template.py @@ -3,7 +3,7 @@ DOCUMENTATION = ''' --- module: win_template -version_added: 1.8 +version_added: "2.0" short_description: Templates a file out to a remote server. description: - Templates are processed by the Jinja2 templating language From 74e40b5fe14a585b1a681f0909f153a8002bc5c0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 10 Jun 2015 12:42:15 -0400 Subject: [PATCH 202/464] fixed doc issue with git --- source_control/git.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index 5915880abd1..d1ed929a68e 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -167,9 +167,9 @@ options: version_added: "2.0" description: - if C(yes), when cloning or checking out a C(version) verify the - signature of a GPG signed commit. This requires C(git) version>=2.1.0 - to be installed. The commit MUST be signed and the public key MUST - be trusted in the GPG trustdb. + signature of a GPG signed commit. This requires C(git) version>=2.1.0 + to be installed. The commit MUST be signed and the public key MUST + be trusted in the GPG trustdb. notes: From 9acc7c402f729748205e78f2b66b8f25b7552e37 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 10 Jun 2015 12:53:34 -0400 Subject: [PATCH 203/464] Revert "added innitial daemon-reloaded support to service module" This reverts commit 438d87d26991c26de4a6b1457b154eb8f50d093a. --- system/service.py | 126 ++++++++++++++++++++-------------------------- 1 file changed, 55 insertions(+), 71 deletions(-) diff --git a/system/service.py b/system/service.py index ac7b6994389..3299b614d52 100644 --- a/system/service.py +++ b/system/service.py @@ -34,13 +34,12 @@ options: - Name of the service. state: required: false - choices: [ started, stopped, restarted, reloaded, daemon_reloaded ] + choices: [ started, stopped, restarted, reloaded ] description: - C(started)/C(stopped) are idempotent actions that will not run commands unless necessary. C(restarted) will always bounce the service. C(reloaded) will always reload. B(At least one of state and enabled are required.) - - The C(daemon_reloaded) state was added in 2.0, it is exclusive for systemd. sleep: required: false version_added: "1.3" @@ -280,7 +279,7 @@ class Service(object): # Find ps binary psbin = self.module.get_bin_path('ps', True) - (rc, psout, pserr) = execute_command('%s %s' % (psbin, psflags)) + (rc, psout, pserr) = self.execute_command('%s %s' % (psbin, psflags)) # If rc is 0, set running as appropriate if rc == 0: self.running = False @@ -1414,7 +1413,7 @@ def main(): module = AnsibleModule( argument_spec = dict( name = dict(required=True), - state = dict(choices=['running', 'started', 'stopped', 'restarted', 'reloaded', 'daemon_reloaded']), + state = dict(choices=['running', 'started', 'stopped', 'restarted', 'reloaded']), sleep = dict(required=False, type='int', default=None), pattern = dict(required=False, default=None), enabled = dict(type='bool'), @@ -1441,81 +1440,66 @@ def main(): result = {} result['name'] = service.name - # shortcut for systemd only daemon-reloaded - if module.params['state'] == 'daemon_reloaded': - cmd = module.get_bin_path('systemctl', True) - svc_cmd = "%s %s %s" % (cmd, service.name, 'daemon-reloaded') - rc, stdout, stderr = module.run_command(svc_cmd) - result['msg']=stdout - if rc != 0: - result['rc'] = rc - if stderr: - result['msg']=stderr - module.fail_json(**result) + # Find service management tools + service.get_service_tools() + + # Enable/disable service startup at boot if requested + if service.module.params['enabled'] is not None: + # FIXME: ideally this should detect if we need to toggle the enablement state, though + # it's unlikely the changed handler would need to fire in this case so it's a minor thing. + service.service_enable() + result['enabled'] = service.enable + + if module.params['state'] is None: + # Not changing the running state, so bail out now. + result['changed'] = service.changed + module.exit_json(**result) - result['changed']=True + result['state'] = service.state + # Collect service status + if service.pattern: + service.check_ps() else: - # Find service management tools - service.get_service_tools() - - # Enable/disable service startup at boot if requested - if service.module.params['enabled'] is not None: - # FIXME: ideally this should detect if we need to toggle the enablement state, though - # it's unlikely the changed handler would need to fire in this case so it's a minor thing. - service.service_enable() - result['enabled'] = service.enable - - if module.params['state'] is None: - # Not changing the running state, so bail out now. - result['changed'] = service.changed - module.exit_json(**result) - - result['state'] = service.state - - # Collect service status - if service.pattern: - service.check_ps() - else: - service.get_service_status() + service.get_service_status() - # Calculate if request will change service state - service.check_service_changed() + # Calculate if request will change service state + service.check_service_changed() - # Modify service state if necessary - (rc, out, err) = service.modify_service_state() + # Modify service state if necessary + (rc, out, err) = service.modify_service_state() - if rc != 0: - if err and "Job is already running" in err: - # upstart got confused, one such possibility is MySQL on Ubuntu 12.04 - # where status may report it has no start/stop links and we could - # not get accurate status - pass - else: - if err: - module.fail_json(msg=err) - else: - module.fail_json(msg=out) - - result['changed'] = service.changed | service.svc_change - if service.module.params['enabled'] is not None: - result['enabled'] = service.module.params['enabled'] - - if not service.module.params['state']: - status = service.get_service_status() - if status is None: - result['state'] = 'absent' - elif status is False: - result['state'] = 'started' - else: - result['state'] = 'stopped' + if rc != 0: + if err and "Job is already running" in err: + # upstart got confused, one such possibility is MySQL on Ubuntu 12.04 + # where status may report it has no start/stop links and we could + # not get accurate status + pass else: - # as we may have just bounced the service the service command may not - # report accurate state at this moment so just show what we ran - if service.module.params['state'] in ['started','restarted','running','reloaded']: - result['state'] = 'started' + if err: + module.fail_json(msg=err) else: - result['state'] = 'stopped' + module.fail_json(msg=out) + + result['changed'] = service.changed | service.svc_change + if service.module.params['enabled'] is not None: + result['enabled'] = service.module.params['enabled'] + + if not service.module.params['state']: + status = service.get_service_status() + if status is None: + result['state'] = 'absent' + elif status is False: + result['state'] = 'started' + else: + result['state'] = 'stopped' + else: + # as we may have just bounced the service the service command may not + # report accurate state at this moment so just show what we ran + if service.module.params['state'] in ['started','restarted','running','reloaded']: + result['state'] = 'started' + else: + result['state'] = 'stopped' module.exit_json(**result) From 4c917d47fc1a0ed4199e0f82efc7e59c387f09d1 Mon Sep 17 00:00:00 2001 From: Jon Hawkesworth Date: Fri, 29 May 2015 02:57:13 +0100 Subject: [PATCH 204/464] Fix win_copy problems described here: https://github.com/ansible/ansible-modules-core/issues/1404 and update documentation. --- windows/win_copy.ps1 | 86 +++++++++++++++++++++++++---------------- windows/win_copy.py | 49 +++++++++++++++++++++-- windows/win_file.ps1 | 53 +++++++++++++++---------- windows/win_file.py | 4 +- windows/win_template.py | 22 ++++++----- 5 files changed, 144 insertions(+), 70 deletions(-) diff --git a/windows/win_copy.ps1 b/windows/win_copy.ps1 index 9ffdab85f03..4a83e091c56 100644 --- a/windows/win_copy.ps1 +++ b/windows/win_copy.ps1 @@ -17,68 +17,88 @@ # WANT_JSON # POWERSHELL_COMMON -$params = Parse-Args $args; +$params = Parse-Args $args -$src= Get-Attr $params "src" $FALSE; +$src= Get-Attr $params "src" $FALSE If ($src -eq $FALSE) { - Fail-Json (New-Object psobject) "missing required argument: src"; + Fail-Json (New-Object psobject) "missing required argument: src" } -$dest= Get-Attr $params "dest" $FALSE; +$dest= Get-Attr $params "dest" $FALSE If ($dest -eq $FALSE) { - Fail-Json (New-Object psobject) "missing required argument: dest"; + Fail-Json (New-Object psobject) "missing required argument: dest" } -# seems to be supplied by the calling environment, but -# probably shouldn't be a test for it existing in the params. -# TODO investigate. -$original_basename = Get-Attr $params "original_basename" $FALSE; +$original_basename = Get-Attr $params "original_basename" $FALSE If ($original_basename -eq $FALSE) { - Fail-Json (New-Object psobject) "missing required argument: original_basename "; + Fail-Json (New-Object psobject) "missing required argument: original_basename " } $result = New-Object psobject @{ changed = $FALSE -}; + original_basename = $original_basename +} + +# original_basename gets set if src and dest are dirs +# but includes subdir if the source folder contains sub folders +# e.g. you could get subdir/foo.txt + +# detect if doing recursive folder copy and create any non-existent destination sub folder +$parent = Split-Path -Path $original_basename -Parent +if ($parent.length -gt 0) +{ + $dest_folder = Join-Path $dest $parent + New-Item -Force $dest_folder -Type directory +} # if $dest is a dir, append $original_basename so the file gets copied with its intended name. if (Test-Path $dest -PathType Container) { - $dest = Join-Path $dest $original_basename; + $dest = Join-Path $dest $original_basename } -If (Test-Path $dest) -{ - $dest_checksum = Get-FileChecksum ($dest); - $src_checksum = Get-FileChecksum ($src); +$dest_checksum = Get-FileChecksum ($dest) +$src_checksum = Get-FileChecksum ($src) - If (! $src_checksum.CompareTo($dest_checksum)) - { - # New-Item -Force creates subdirs for recursive copies - New-Item -Force $dest -Type file; - Copy-Item -Path $src -Destination $dest -Force; - } - $dest_checksum = Get-FileChecksum ($dest); - If ( $src_checksum.CompareTo($dest_checksum)) +If ($src_checksum.Equals($dest_checksum)) +{ + # if both are "3" then both are folders, ok to copy + If ($src_checksum.Equals("3")) { - $result.changed = $TRUE; + # New-Item -Force creates subdirs for recursive copies + New-Item -Force $dest -Type file + Copy-Item -Path $src -Destination $dest -Force + $result.operation = "folder_copy" } - Else + +} +ElseIf (! $src_checksum.Equals($dest_checksum)) +{ + If ($src_checksum.Equals("3")) { - Fail-Json (New-Object psobject) "Failed to place file"; + Fail-Json (New-Object psobject) "If src is a folder, dest must also be a folder" } + # The checksums don't match, there's something to do + Copy-Item -Path $src -Destination $dest -Force + $result.operation = "file_copy" +} + +# verify before we return that the file has changed +$dest_checksum = Get-FileChecksum ($dest) +If ( $src_checksum.Equals($dest_checksum)) +{ + $result.changed = $TRUE } Else { - New-Item -Force $dest -Type file; - Copy-Item -Path $src -Destination $dest; - $result.changed = $TRUE; + Fail-Json (New-Object psobject) "src checksum $src_checksum did not match dest_checksum $dest_checksum Failed to place file $original_basename in $dest" } +# generate return values -$dest_checksum = Get-FileChecksum($dest); -$result.checksum = $dest_checksum; +$info = Get-Item $dest +$result.size = $info.Length -Exit-Json $result; +Exit-Json $result diff --git a/windows/win_copy.py b/windows/win_copy.py index 16b6859488f..d77f37f64d0 100644 --- a/windows/win_copy.py +++ b/windows/win_copy.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# (c) 2012, Michael DeHaan +# (c) 2015, Jon Hawkesworth (@jhawkesworth) # # This file is part of Ansible # @@ -45,16 +45,57 @@ options: this must be a directory too. Use \\ for path separators. required: true default: null -author: Michael DeHaan +author: "Jon Hawkesworth (@jhawkesworth)" notes: - The "win_copy" module recursively copy facility does not scale to lots (>hundreds) of files. Instead, you may find it better to create files locally, perhaps using win_template, and - then use win_get_url to put them in the correct location. + then use win_get_url to fetch them from your managed hosts into the correct location. ''' EXAMPLES = ''' -# Example from Ansible Playbooks +# Copy a single file - win_copy: src=/srv/myfiles/foo.conf dest=c:\\TEMP\\foo.conf +# Copy the contents of files/temp_files dir into c:\temp\. Includes any sub dirs under files/temp_files +# Note the use of unix style path in the dest. +# This is necessary because \ is yaml escape sequence +- win_copy: src=files/temp_files/ dest=c:/temp/ + +# Copy the files/temp_files dir and any files or sub dirs into c:\temp +# Copies the folder because there is no trailing / on 'files/temp_files' +- win_copy: src=files/temp_files dest=c:/temp/ + +''' +RETURN = ''' +dest: + description: destination file/path + returned: changed + type: string + sample: "c:/temp/" +src: + description: source file used for the copy on the target machine + returned: changed + type: string + sample: "/home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source" +checksum: + description: checksum of the file after running copy + returned: success + type: string + sample: "6e642bb8dd5c2e027bf21dd923337cbb4214f827" +size: + description: size of the target, after execution + returned: changed (single files only) + type: int + sample: 1220 +operation: + description: whether a single file copy took place or a folder copy + returned: changed (single files only) + type: string + sample: "file_copy" +original_basename: + description: basename of the copied file + returned: changed (single files only) + type: string + sample: "foo.txt" ''' diff --git a/windows/win_file.ps1 b/windows/win_file.ps1 index 62ac81fc1ee..0f3c20ec8e3 100644 --- a/windows/win_file.ps1 +++ b/windows/win_file.ps1 @@ -17,19 +17,19 @@ # WANT_JSON # POWERSHELL_COMMON -$params = Parse-Args $args; +$params = Parse-Args $args # path -$path = Get-Attr $params "path" $FALSE; +$path = Get-Attr $params "path" $FALSE If ($path -eq $FALSE) { - $path = Get-Attr $params "dest" $FALSE; + $path = Get-Attr $params "dest" $FALSE If ($path -eq $FALSE) { - $path = Get-Attr $params "name" $FALSE; + $path = Get-Attr $params "name" $FALSE If ($path -eq $FALSE) { - Fail-Json (New-Object psobject) "missing required argument: path"; + Fail-Json (New-Object psobject) "missing required argument: path" } } } @@ -39,17 +39,14 @@ If ($path -eq $FALSE) # state - file, directory, touch, absent # (originally was: state - file, link, directory, hard, touch, absent) -$state = Get-Attr $params "state" "file"; - -#$recurse = Get-Attr $params "recurse" "no"; - -# force - yes, no -# $force = Get-Attr $params "force" "no"; +$state = Get-Attr $params "state" "unspecified" +# if state is not supplied, test the $path to see if it looks like +# a file or a folder and set state to file or folder # result $result = New-Object psobject @{ changed = $FALSE -}; +} If ( $state -eq "touch" ) { @@ -61,45 +58,59 @@ If ( $state -eq "touch" ) { echo $null > $file } - $result.changed = $TRUE; + $result.changed = $TRUE } If (Test-Path $path) { - $fileinfo = Get-Item $path; + $fileinfo = Get-Item $path If ( $state -eq "absent" ) { - Remove-Item -Recurse -Force $fileinfo; - $result.changed = $TRUE; + Remove-Item -Recurse -Force $fileinfo + $result.changed = $TRUE } Else { # Only files have the .Directory attribute. If ( $state -eq "directory" -and $fileinfo.Directory ) { - Fail-Json (New-Object psobject) "path is not a directory"; + Fail-Json (New-Object psobject) "path is not a directory" } # Only files have the .Directory attribute. If ( $state -eq "file" -and -not $fileinfo.Directory ) { - Fail-Json (New-Object psobject) "path is not a file"; + Fail-Json (New-Object psobject) "path is not a file" } } } Else +# doesn't yet exist { + If ( $state -eq "unspecified" ) + { + $basename = Split-Path -Path $path -Leaf + If ($basename.length -gt 0) + { + $state = "file" + } + Else + { + $state = "directory" + } + } + If ( $state -eq "directory" ) { New-Item -ItemType directory -Path $path - $result.changed = $TRUE; + $result.changed = $TRUE } If ( $state -eq "file" ) { - Fail-Json (New-Object psobject) "path will not be created"; + Fail-Json (New-Object psobject) "path will not be created" } } -Exit-Json $result; +Exit-Json $result diff --git a/windows/win_file.py b/windows/win_file.py index 6a218216617..4953dd9363b 100644 --- a/windows/win_file.py +++ b/windows/win_file.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# (c) 2012, Michael DeHaan +# (c) 2015, Jon Hawkesworth (@jhawkesworth) # # This file is part of Ansible # @@ -32,7 +32,7 @@ description: notes: - See also M(win_copy), M(win_template), M(copy), M(template), M(assemble) requirements: [ ] -author: Michael DeHaan +author: "Jon Hawkesworth (@jhawkesworth)" options: path: description: diff --git a/windows/win_template.py b/windows/win_template.py index d95d1125fcc..7f981c33daf 100644 --- a/windows/win_template.py +++ b/windows/win_template.py @@ -31,22 +31,24 @@ options: - Location to render the template to on the remote machine. required: true default: null - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - required: false - choices: [ "yes", "no" ] - default: "no" notes: - "templates are loaded with C(trim_blocks=True)." + - By default, windows line endings are not created in the generated file. + - In order to ensure windows line endings are in the generated file, + add the following header as the first line of your template: + "#jinja2: newline_sequence:'\r\n'" + and ensure each line of the template ends with \r\n + - Beware fetching files from windows machines when creating templates + because certain tools, such as Powershell ISE, and regedit's export facility + add a Byte Order Mark as the first character of the file, which can cause tracebacks. + - Use "od -cx" to examine your templates for Byte Order Marks. requirements: [] -author: Michael DeHaan +author: "Jon Hawkesworth (@jhawkesworth)" ''' EXAMPLES = ''' -# Example -- win_template: src=/mytemplates/foo.j2 dest=C:\\temp\\file.conf +# Playbook Example (win_template can only be run inside a playbook) +- win_template: src=/mytemplates/file.conf.j2 dest=C:\\temp\\file.conf ''' From 5e500c6464d07c98cd6b5ae8f97cdf41b6893e01 Mon Sep 17 00:00:00 2001 From: Jon Hawkesworth Date: Wed, 10 Jun 2015 18:53:22 +0100 Subject: [PATCH 205/464] corrected version added which I got wrong while rebasing --- windows/win_copy.py | 2 +- windows/win_template.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/windows/win_copy.py b/windows/win_copy.py index d77f37f64d0..54f035b1851 100644 --- a/windows/win_copy.py +++ b/windows/win_copy.py @@ -24,7 +24,7 @@ import time DOCUMENTATION = ''' --- module: win_copy -version_added: "2.0" +version_added: "1.9.2" short_description: Copies files to remote locations on windows hosts. description: - The M(win_copy) module copies a file on the local box to remote windows locations. diff --git a/windows/win_template.py b/windows/win_template.py index 7f981c33daf..5722065ccb0 100644 --- a/windows/win_template.py +++ b/windows/win_template.py @@ -3,7 +3,7 @@ DOCUMENTATION = ''' --- module: win_template -version_added: "2.0" +version_added: "1.9.2" short_description: Templates a file out to a remote server. description: - Templates are processed by the Jinja2 templating language From e5cedc617a0eb3c75773887a01f361607cc7d171 Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Wed, 10 Jun 2015 14:02:37 -0400 Subject: [PATCH 206/464] Remove 'result' value This value is pretty much useless, and a holdover from the old module code. Let's remove it. --- cloud/openstack/os_security_group.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index bf316962a39..4aaff2470d6 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -118,23 +118,21 @@ def main(): if state == 'present': if not secgroup: secgroup = cloud.create_security_group(name, description) - module.exit_json(changed=True, result='created', - id=secgroup['id']) + module.exit_json(changed=True, id=secgroup['id']) else: if _needs_update(module, secgroup): secgroup = cloud.update_security_group( secgroup['id'], description=description) - module.exit_json(changed=True, result='updated', - id=secgroup['id']) + module.exit_json(changed=True, id=secgroup['id']) else: - module.exit_json(changed=False, result='success') + module.exit_json(changed=False) if state == 'absent': if not secgroup: - module.exit_json(changed=False, result='success') + module.exit_json(changed=False) else: cloud.delete_security_group(secgroup['id']) - module.exit_json(changed=True, result='deleted') + module.exit_json(changed=True) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From bf699e55f6e4ac934b404504b16fc9ef643b4efb Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Wed, 10 Jun 2015 14:14:01 -0400 Subject: [PATCH 207/464] Remove os_security_group_rule module The rules module will have it's own branch. --- cloud/openstack/os_security_group_rule.py | 156 ---------------------- 1 file changed, 156 deletions(-) delete mode 100644 cloud/openstack/os_security_group_rule.py diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py deleted file mode 100644 index 849919e6394..00000000000 --- a/cloud/openstack/os_security_group_rule.py +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/python - -# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. -# Copyright (c) 2013, Benno Joy -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . - -try: - import shade -except ImportError: - print("failed=True msg='shade is required for this module'") - - -DOCUMENTATION = ''' ---- -module: os_security_group_rule -short_description: Add/Delete rule from an existing security group -extends_documentation_fragment: openstack -version_added: "2.0" -description: - - Add or Remove rule from an existing security group -options: - security_group: - description: - - Name of the security group - required: true - protocol: - description: - - IP protocol - choices: ['tcp', 'udp', 'icmp'] - default: tcp - port_range_min: - description: - - Starting port - required: true - port_range_max: - description: - - Ending port - required: true - remote_ip_prefix: - description: - - Source IP address(es) in CIDR notation (exclusive with remote_group) - required: false - remote_group: - description: - - ID of Security group to link (exclusive with remote_ip_prefix) - required: false - state: - description: - - Should the resource be present or absent. - choices: [present, absent] - default: present - -requirements: ["shade"] -''' -# TODO(mordred): add ethertype and direction - -EXAMPLES = ''' -# Create a security group rule -- os_security_group_rule: - cloud: mordred - security_group: foo - protocol: tcp - port_range_min: 80 - port_range_max: 80 - remote_ip_prefix: 0.0.0.0/0 -''' - - -def _security_group_rule(module, nova_client, action='create', **kwargs): - f = getattr(nova_client.security_group_rules, action) - try: - secgroup = f(**kwargs) - except Exception, e: - module.fail_json(msg='Failed to %s security group rule: %s' % - (action, e.message)) - - -def _get_rule_from_group(module, secgroup): - for rule in secgroup.rules: - if (rule['ip_protocol'] == module.params['protocol'] and - rule['from_port'] == module.params['port_range_min'] and - rule['to_port'] == module.params['port_range_max'] and - rule['ip_range']['cidr'] == module.params['remote_ip_prefix']): - return rule - return None - -def main(): - - argument_spec = openstack_full_argument_spec( - security_group = dict(required=True), - protocol = dict(default='tcp', choices=['tcp', 'udp', 'icmp']), - port_range_min = dict(required=True), - port_range_max = dict(required=True), - remote_ip_prefix = dict(required=False, default=None), - # TODO(mordred): Make remote_group handle name and id - remote_group = dict(required=False, default=None), - state = dict(default='present', choices=['absent', 'present']), - ) - module_kwargs = openstack_module_kwargs( - mutually_exclusive=[ - ['remote_ip_prefix', 'remote_group'], - ], - ) - module = AnsibleModule(argument_spec, **module_kwargs) - - try: - cloud = shade.openstack_cloud(**module.params) - nova_client = cloud.nova_client - changed = False - - secgroup = cloud.get_security_group(module.params['security_group']) - - if module.params['state'] == 'present': - if not secgroup: - module.fail_json(msg='Could not find security group %s' % - module.params['security_group']) - - if not _get_rule_from_group(module, secgroup): - _security_group_rule(module, nova_client, 'create', - parent_group_id=secgroup.id, - ip_protocol=module.params['protocol'], - from_port=module.params['port_range_min'], - to_port=module.params['port_range_max'], - cidr=module.params['remote_ip'], - group_id=module.params['remote_group'], - changed = True - - - if module.params['state'] == 'absent' and secgroup: - rule = _get_rule_from_group(module, secgroup) - if secgroup and rule: - _security_group_rule(module, nova_client, 'delete', - rule=rule['id']) - changed = True - - module.exit_json(changed=changed, result="success") - - except shade.OpenStackCloudException as e: - module.fail_json(msg=e.message) - -# this is magic, see lib/ansible/module_common.py -from ansible.module_utils.basic import * -from ansible.module_utils.openstack import * -main() From 5b6c0c3697d27eb67de5d1b1960cf6274d9880a6 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 11 Jun 2015 02:29:28 +0200 Subject: [PATCH 208/464] Add developer docs for the OpenStack modules --- cloud/openstack/README.md | 49 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 cloud/openstack/README.md diff --git a/cloud/openstack/README.md b/cloud/openstack/README.md new file mode 100644 index 00000000000..a9b22234add --- /dev/null +++ b/cloud/openstack/README.md @@ -0,0 +1,49 @@ +OpenStack Ansible Modules +========================= + +These are a set of modules for interacting with OpenStack as either an admin +or an end user. If the module does not begin with os_, it's either deprecated +or soon to be. This document serves as developer coding guidelines for +modules intended to be here. + +Naming +------ + +* All modules should start with os_ +* If the module is one that a cloud consumer would expect to use, it should be + named after the logical resource it manages. Thus, os\_server not os\_nova. + The reasoning for this is that there are more than one resource that are + managed by more than one service and which one manages it is a deployment + detail. A good example of this are floating IPs, which can come from either + Nova or Neutron, but which one they come from is immaterial to an end user. +* If the module is one that a cloud admin would expect to use, it should be + be named with the service and the resouce, such as os\_keystone\_domain. +* If the module is one that a cloud admin and a cloud consumer could both use, + the cloud consumer rules apply. + +Interoperability +---------------- + +* It should be assumed that the cloud consumer does not know a bazillion + details about the deployment choices their cloud provider made, and a best + effort should be made to present one sane interface to the ansible user + regardless of deployer insanity. +* All modules should work appropriately against all existing known public + OpenStack clouds. +* It should be assumed that a user may have more than one cloud account that + they wish to combine as part of a single ansible managed infrastructure. + +Libraries +--------- + +* All modules should use openstack\_full\_argument\_spec to pick up the + standard input such as auth and ssl support. +* All modules should extends\_documentation\_fragment: openstack to go along + with openstack\_full\_argument\_spec. +* All complex cloud interaction or interoperability code should be housed in + the [shade](http://git.openstack.org/cgit/openstack-infra/shade) library. +* All OpenStack API interactions should happen via shade and not via + OpenStack Client libraries. The OpenStack Client libraries do no have end + users as a primary audience, they are for intra-server communication. The + python-openstacksdk is the future there, and shade will migrate to it when + its ready in a manner that is not noticable to ansible users. From 48422fba85b73f39ad48b26e7e104b0e84064cf2 Mon Sep 17 00:00:00 2001 From: Edward Torbett Date: Thu, 11 Jun 2015 12:39:42 +0100 Subject: [PATCH 209/464] Rather than executing yum once per package, execute yum once for all supplied packages. This is necessary when performing a yum upgrade involving multiple dependent packages installed from RPM, for example when upgrading from PostgreSQL 9.0.11 to 9.0.21 on a Red Hat server. --- packaging/os/yum.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 00f77d68dfc..e81f16bb2c1 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -485,6 +485,7 @@ def list_stuff(module, conf_file, stuff): def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): + pkgs = [] res = {} res['results'] = [] res['msg'] = '' @@ -586,7 +587,10 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # the error we're catching here pkg = spec - cmd = yum_basecmd + ['install', pkg] + pkgs.append(pkg) + + if pkgs: + cmd = yum_basecmd + ['install'] + pkgs if module.check_mode: # Remove rpms downloaded for EL5 via url @@ -596,15 +600,15 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e)) module.exit_json(changed=True) - changed = True - rc, out, err = module.run_command(cmd) - # Fail on invalid urls: - if (rc == 1 and '://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)): - err = 'Package at %s could not be installed' % spec - module.fail_json(changed=False,msg=err,rc=1) - elif (rc != 0 and 'Nothing to do' in err) or 'Nothing to do' in out: + if (rc == 1): + for spec in items: + # Fail on invalid urls: + if ('://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)): + err = 'Package at %s could not be installed' % spec + module.fail_json(changed=False,msg=err,rc=1) + if (rc != 0 and 'Nothing to do' in err) or 'Nothing to do' in out: # avoid failing in the 'Nothing To Do' case # this may happen with an URL spec. # for an already installed group, @@ -614,16 +618,16 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): out = '%s: Nothing to do' % spec changed = False - res['rc'] += rc + res['rc'] = rc res['results'].append(out) res['msg'] += err # FIXME - if we did an install - go and check the rpmdb to see if it actually installed - # look for the pkg in rpmdb - # look for the pkg via obsoletes + # look for each pkg in rpmdb + # look for each pkg via obsoletes - # accumulate any changes - res['changed'] |= changed + # Record change + res['changed'] = True # Remove rpms downloaded for EL5 via url try: From 51bc602f42d5c772e4de1e9202f3fe5e74de2d88 Mon Sep 17 00:00:00 2001 From: Bruce Pennypacker Date: Thu, 11 Jun 2015 13:43:15 +0000 Subject: [PATCH 210/464] Added delobj command to delete an object within a bucket --- cloud/amazon/s3.py | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 6f8e447397d..9a9b626718f 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -64,7 +64,7 @@ options: version_added: "1.6" mode: description: - - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket) and delete (bucket). + - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket), delete (bucket), and delobj (delete object). required: true default: null aliases: [] @@ -126,8 +126,11 @@ EXAMPLES = ''' # Delete a bucket and all contents - s3: bucket=mybucket mode=delete -# GET an object but dont download if the file checksums match +# GET an object but dont download if the file checksums match - s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get overwrite=different + +# Delete an object from a bucket +- s3: bucket=mybucket object=/my/desired/key.txt mode=delobj ''' import os @@ -305,7 +308,7 @@ def main(): encrypt = dict(default=True, type='bool'), expiry = dict(default=600, aliases=['expiration']), metadata = dict(type='dict'), - mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr'], required=True), + mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj'], required=True), object = dict(), overwrite = dict(aliases=['force'], default='always'), retries = dict(aliases=['retry'], type='int', default=0), @@ -477,7 +480,23 @@ def main(): if bucketrtn is True and pathrtn is True and keyrtn is False: upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt) - # Support for deleting an object if we have both params. + # Delete an object from a bucket, not the entire bucket + if mode == 'delobj': + if obj is None: + module.fail_json(msg="object parameter is required", failed=True); + if bucket: + bucketrtn = bucket_check(module, s3, bucket) + if bucketrtn is True: + deletertn = delete_key(module, s3, bucket, obj) + if deletertn is True: + module.exit_json(msg="Object %s deleted from bucket %s." % (obj, bucket), changed=True) + else: + module.fail_json(msg="Bucket does not exist.", changed=False) + else: + module.fail_json(msg="Bucket parameter is required.", failed=True) + + + # Delete an entire bucket, including all objects in the bucket if mode == 'delete': if bucket: bucketrtn = bucket_check(module, s3, bucket) From 471824b451581ccc22f511fd482cc38b0372b17f Mon Sep 17 00:00:00 2001 From: Edward Torbett Date: Thu, 11 Jun 2015 15:13:28 +0100 Subject: [PATCH 211/464] Comments by @abadger --- packaging/os/yum.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index e81f16bb2c1..858ed3af9a4 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -600,6 +600,8 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e)) module.exit_json(changed=True) + changed = True + rc, out, err = module.run_command(cmd) if (rc == 1): @@ -627,7 +629,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # look for each pkg via obsoletes # Record change - res['changed'] = True + res['changed'] = changed # Remove rpms downloaded for EL5 via url try: From fc36506b9eed111e35e8858ca5d5d3ed2fb9f9d2 Mon Sep 17 00:00:00 2001 From: Edward Torbett Date: Thu, 11 Jun 2015 15:21:30 +0100 Subject: [PATCH 212/464] Added multi package operation to remove as suggested by @abadger. Adding to latest is a little more complex due to '*' support. --- packaging/os/yum.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 858ed3af9a4..8063904e312 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -642,6 +642,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): + pkgs = [] res = {} res['results'] = [] res['msg'] = '' @@ -658,17 +659,20 @@ def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): res['results'].append('%s is not installed' % pkg) continue + pkgs.append(pkg) + + if pkgs: # run an actual yum transaction - cmd = yum_basecmd + ["remove", pkg] + cmd = yum_basecmd + ["remove"] + pkg if module.check_mode: module.exit_json(changed=True) rc, out, err = module.run_command(cmd) - res['rc'] += rc + res['rc'] = rc res['results'].append(out) - res['msg'] += err + res['msg'] = err # compile the results into one batch. If anything is changed # then mark changed @@ -677,12 +681,13 @@ def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # at this point we should check to see if the pkg is no longer present - if not is_group: # we can't sensibly check for a group being uninstalled reliably - # look to see if the pkg shows up from is_installed. If it doesn't - if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos): - res['changed'] = True - else: - module.fail_json(**res) + for pkg in pkgs: + if not pkg.startswith('@'): # we can't sensibly check for a group being uninstalled reliably + # look to see if the pkg shows up from is_installed. If it doesn't + if not is_installed(module, repoq, pkg, conf_file, en_repos=en_repos, dis_repos=dis_repos): + res['changed'] = True + else: + module.fail_json(**res) if rc != 0: module.fail_json(**res) From 2c8b765cf06f914bf7957790fd2b1ac4b478b964 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 11 Jun 2015 11:35:49 -0400 Subject: [PATCH 213/464] minor doc fixes on win_template --- windows/win_template.py | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/windows/win_template.py b/windows/win_template.py index 5722065ccb0..c384ad7775f 100644 --- a/windows/win_template.py +++ b/windows/win_template.py @@ -10,12 +10,12 @@ description: (U(http://jinja.pocoo.org/docs/)) - documentation on the template formatting can be found in the Template Designer Documentation (U(http://jinja.pocoo.org/docs/templates/)). - - "Six additional variables can be used in templates: C(ansible_managed) + - "Six additional variables can be used in templates: C(ansible_managed) (configurable via the C(defaults) section of C(ansible.cfg)) contains a string which can be used to describe the template name, host, modification time of the - template file and the owner uid, C(template_host) contains the node name of + template file and the owner uid, C(template_host) contains the node name of the template's machine, C(template_uid) the owner, C(template_path) the - absolute path of the template, C(template_fullpath) is the absolute path of the + absolute path of the template, C(template_fullpath) is the absolute path of the template, and C(template_run_date) is the date that the template was rendered. Note that including a string that uses a date in the template will result in the template being marked 'changed' each time." @@ -24,31 +24,26 @@ options: description: - Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path. required: true - default: null - aliases: [] dest: description: - Location to render the template to on the remote machine. required: true - default: null notes: - "templates are loaded with C(trim_blocks=True)." - By default, windows line endings are not created in the generated file. - - In order to ensure windows line endings are in the generated file, - add the following header as the first line of your template: - "#jinja2: newline_sequence:'\r\n'" - and ensure each line of the template ends with \r\n + - "In order to ensure windows line endings are in the generated file, add the following header + as the first line of your template: #jinja2: newline_sequence:'\r\n' and ensure each line + of the template ends with \r\n" - Beware fetching files from windows machines when creating templates because certain tools, such as Powershell ISE, and regedit's export facility - add a Byte Order Mark as the first character of the file, which can cause tracebacks. + add a Byte Order Mark as the first character of the file, which can cause tracebacks. - Use "od -cx" to examine your templates for Byte Order Marks. -requirements: [] author: "Jon Hawkesworth (@jhawkesworth)" ''' EXAMPLES = ''' # Playbook Example (win_template can only be run inside a playbook) -- win_template: src=/mytemplates/file.conf.j2 dest=C:\\temp\\file.conf +- win_template: src=/mytemplates/file.conf.j2 dest=C:\\temp\\file.conf ''' From d6696aca9f3c23722ba0b14c20385f6c1d604039 Mon Sep 17 00:00:00 2001 From: Jon Hawkesworth Date: Thu, 11 Jun 2015 18:44:31 +0100 Subject: [PATCH 214/464] remove include of files options only relevant to unix from win_file.py documentation --- windows/win_file.py | 1 - 1 file changed, 1 deletion(-) diff --git a/windows/win_file.py b/windows/win_file.py index 6a218216617..5f606112609 100644 --- a/windows/win_file.py +++ b/windows/win_file.py @@ -24,7 +24,6 @@ DOCUMENTATION = ''' module: win_file version_added: "1.8" short_description: Creates, touches or removes files or directories. -extends_documentation_fragment: files description: - Creates (empty) files, updates file modification stamps of existing files, and can create or remove directories. From 1e4bd62af5c9669c2a5e54eac6b389d54d4d8410 Mon Sep 17 00:00:00 2001 From: Andrea Mandolo Date: Mon, 1 Dec 2014 17:06:20 +0100 Subject: [PATCH 215/464] Add "block_device_mapping" parameter on EC2_AMI Amazon module - ugraded --- cloud/amazon/ec2_ami.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/ec2_ami.py b/cloud/amazon/ec2_ami.py index 98c41357212..23d75171e06 100644 --- a/cloud/amazon/ec2_ami.py +++ b/cloud/amazon/ec2_ami.py @@ -136,6 +136,7 @@ import time try: import boto import boto.ec2 + from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping HAS_BOTO = True except ImportError: HAS_BOTO = False @@ -155,6 +156,7 @@ def create_image(module, ec2): wait_timeout = int(module.params.get('wait_timeout')) description = module.params.get('description') no_reboot = module.params.get('no_reboot') + device_mapping = module.params.get('device_mapping') tags = module.params.get('tags') try: @@ -163,6 +165,17 @@ def create_image(module, ec2): 'description': description, 'no_reboot': no_reboot} + if device_mapping: + bdm = BlockDeviceMapping() + for device in device_mapping: + if 'device_name' not in device: + module.fail_json(msg = 'Device name must be set for volume') + device_name = device['device_name'] + del device['device_name'] + bd = BlockDeviceType(**device) + bdm[device_name] = bd + params['block_device_mapping'] = bdm + image_id = ec2.create_image(**params) except boto.exception.BotoServerError, e: if e.error_code == 'InvalidAMIName.Duplicate': @@ -257,8 +270,8 @@ def main(): description = dict(default=""), no_reboot = dict(default=False, type="bool"), state = dict(default='present'), - tags = dict(type='dict'), - + device_mapping = dict(type='list'), + tags = dict(type='dict') ) ) module = AnsibleModule(argument_spec=argument_spec) @@ -291,4 +304,3 @@ from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * main() - From 0b0d97299868e26134f0c5a240419a1436528cb6 Mon Sep 17 00:00:00 2001 From: Andrea Mandolo Date: Mon, 1 Dec 2014 17:51:48 +0100 Subject: [PATCH 216/464] Add "block_device_mapping" parameter on EC2_AMI Amazon module (DOCUMENTATION) - upgraded --- cloud/amazon/ec2_ami.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/cloud/amazon/ec2_ami.py b/cloud/amazon/ec2_ami.py index 23d75171e06..979dd3b6a25 100644 --- a/cloud/amazon/ec2_ami.py +++ b/cloud/amazon/ec2_ami.py @@ -69,6 +69,12 @@ options: - Image ID to be deregistered. required: false default: null + device_mapping: + version_added: "1.9" + description: + - An optional list of devices with custom configurations (same block-device-mapping parameters) + required: false + default: null delete_snapshot: description: - Whether or not to delete an AMI while deregistering it. @@ -110,6 +116,23 @@ EXAMPLES = ''' name: newtest register: instance +# AMI Creation, with a custom root-device size and another EBS attached +- ec2_ami + aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx + aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + instance_id: i-xxxxxx + name: newtest + device_mapping: + - device_name: /dev/sda1 + size: XXX + delete_on_termination: true + volume_type: gp2 + - device_name: /dev/sdb + size: YYY + delete_on_termination: false + volume_type: gp2 + register: instance + # Deregister/Delete AMI - ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx From 1fe79801f8699bb0679f61719eb85bf873976257 Mon Sep 17 00:00:00 2001 From: Edward Torbett Date: Fri, 12 Jun 2015 11:18:14 +0100 Subject: [PATCH 217/464] Corrected pkg to pkgs as noted by @abadger --- packaging/os/yum.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 8063904e312..534440241b2 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -663,7 +663,7 @@ def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): if pkgs: # run an actual yum transaction - cmd = yum_basecmd + ["remove"] + pkg + cmd = yum_basecmd + ["remove"] + pkgs if module.check_mode: module.exit_json(changed=True) From 558f2ace1f3448dd50c17d38de9a50f5850c050a Mon Sep 17 00:00:00 2001 From: Ed Hein Date: Fri, 12 Jun 2015 12:36:52 +0200 Subject: [PATCH 218/464] Fix computation of port bindings. Port bindings configuration can be a list if several host ports are bound to the same guest port. --- cloud/docker/docker.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index cb6d3dae075..b04b6ee335a 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1041,15 +1041,14 @@ class DockerManager(object): for container_port, config in self.port_bindings.iteritems(): if isinstance(container_port, int): container_port = "{0}/tcp".format(container_port) - bind = {} if len(config) == 1: - bind['HostIp'] = "0.0.0.0" - bind['HostPort'] = "" + expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}] + elif isinstance(config[0], tuple): + expected_bound_ports[container_port] = [] + for hostip, hostport in config: + expected_bound_ports[container_port].append({ 'HostIp': hostip, 'HostPort': str(hostport)}) else: - bind['HostIp'] = config[0] - bind['HostPort'] = str(config[1]) - - expected_bound_ports[container_port] = [bind] + expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}] actual_bound_ports = container['HostConfig']['PortBindings'] or {} From bb8e9563cf4dfcc11694733900ac1f525b270716 Mon Sep 17 00:00:00 2001 From: Soenke Ruempler Date: Mon, 9 Mar 2015 17:10:01 +0100 Subject: [PATCH 219/464] Add support for SNS notification ARNs in CloudFormation --- cloud/amazon/cloudformation.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index 1718ef142b1..b774cbfefbb 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -54,6 +54,12 @@ options: required: false default: null aliases: [] + notification_arns: + description: + - The Simple Notification Service (SNS) topic ARNs to publish stack related events. + required: false + default: null + version_added: "2.0" stack_policy: description: - the path of the cloudformation stack policy @@ -228,6 +234,7 @@ def main(): template_parameters=dict(required=False, type='dict', default={}), state=dict(default='present', choices=['present', 'absent']), template=dict(default=None, required=False), + notification_arns=dict(default=None, required=False), stack_policy=dict(default=None, required=False), disable_rollback=dict(default=False, type='bool'), template_url=dict(default=None, required=False), @@ -264,6 +271,8 @@ def main(): else: template_body = json.dumps(yaml.load(template_body), indent=2) + notification_arns = module.params['notification_arns'] + if module.params['stack_policy'] is not None: stack_policy_body = open(module.params['stack_policy'], 'r').read() else: @@ -304,6 +313,7 @@ def main(): try: cfn.create_stack(stack_name, parameters=template_parameters_tup, template_body=template_body, + notification_arns=notification_arns, stack_policy_body=stack_policy_body, template_url=template_url, disable_rollback=disable_rollback, @@ -326,6 +336,7 @@ def main(): try: cfn.update_stack(stack_name, parameters=template_parameters_tup, template_body=template_body, + notification_arns=notification_arns, stack_policy_body=stack_policy_body, disable_rollback=disable_rollback, template_url=template_url, From 7f12130d7a3b7f2aae76ac37f45f192cd0d1d6b0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 12 Jun 2015 10:39:47 -0400 Subject: [PATCH 220/464] fixed version added --- cloud/amazon/ec2_ami.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_ami.py b/cloud/amazon/ec2_ami.py index 979dd3b6a25..b9bb1bff72d 100644 --- a/cloud/amazon/ec2_ami.py +++ b/cloud/amazon/ec2_ami.py @@ -70,7 +70,7 @@ options: required: false default: null device_mapping: - version_added: "1.9" + version_added: "2.0" description: - An optional list of devices with custom configurations (same block-device-mapping parameters) required: false From bcbf8c555678214e905448692b00a1b7dbd01599 Mon Sep 17 00:00:00 2001 From: Edward Torbett Date: Fri, 12 Jun 2015 18:06:25 +0100 Subject: [PATCH 221/464] Renamed previous pkgs variable to installed_pkgs as spotted by @strahinja --- packaging/os/yum.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 534440241b2..9490a15b15d 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -541,9 +541,9 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # short circuit all the bs - and search for it as a pkg in is_installed # if you find it then we're done if not set(['*','?']).intersection(set(spec)): - pkgs = is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True) - if pkgs: - res['results'].append('%s providing %s is already installed' % (pkgs[0], spec)) + installed_pkgs = is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True) + if installed_pkgs: + res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], spec)) continue # look up what pkgs provide this From 9c88f9109276ae0ec11144c6c2fe25d8562fa6f1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 13 Jun 2015 17:08:32 -0700 Subject: [PATCH 222/464] Yum API is faster than calling out to repoquery. Looking through the commit logs it looks like we weren't previously doing that because of commit 14479e6adca38e2691e7184bf9f1f713ef265ec7 The message there is that Yum API prints an error message if the rhn-plugin is in use and no rhn-certificate is available. So instead of using repoquery in preference always here we use repoquery in preference if the rhn-plugin is enabled. --- packaging/os/yum.py | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 9490a15b15d..61fc9f53333 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -152,12 +152,6 @@ EXAMPLES = ''' def_qf = "%{name}-%{version}-%{release}.%{arch}" -repoquery='/usr/bin/repoquery' -if not os.path.exists(repoquery): - repoquery = None - -yumbin='/usr/bin/yum' - def log(msg): syslog.openlog('ansible-yum', 0, syslog.LOG_USER) syslog.syslog(syslog.LOG_NOTICE, msg) @@ -177,10 +171,6 @@ def install_yum_utils(module): yum_path = module.get_bin_path('yum') if yum_path: rc, so, se = module.run_command('%s -y install yum-utils' % yum_path) - if rc == 0: - this_path = module.get_bin_path('repoquery') - global repoquery - repoquery = this_path def po_to_nevra(po): @@ -465,10 +455,10 @@ def repolist(module, repoq, qf="%{repoid}"): ret = set([ p for p in out.split('\n') if p.strip() ]) return ret -def list_stuff(module, conf_file, stuff): +def list_stuff(module, repoquerybin, conf_file, stuff): qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}" - repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet'] + repoq = [repoquerybin, '--show-duplicates', '--plugins', '--quiet'] if conf_file and os.path.exists(conf_file): repoq += ['-c', conf_file] @@ -779,13 +769,24 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo, disable_gpg_check, exclude): + yumbin = module.get_bin_path('yum') # need debug level 2 to get 'Nothing to do' for groupinstall. yum_basecmd = [yumbin, '-d', '2', '-y'] - if not repoquery: - repoq = None - else: - repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet'] + # If rhn-plugin is installed and no rhn-certificate is available on the + # system then users will see an error message using the yum API. Use + # repoquery in those cases. + + my = yum_base(conf_file) + # A sideeffect of accessing conf is that the configuration is + # loaded and plugins are discovered + my.conf + + repoq = None + if 'rhnplugin' in my.plugins._plugins: + repoquery = module.get_bin_path('repoquery', required=False) + if repoquery: + repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet'] if conf_file and os.path.exists(conf_file): yum_basecmd += ['-c', conf_file] @@ -806,7 +807,7 @@ def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo, if exclude: e_cmd = ['--exclude=%s' % exclude] yum_basecmd.extend(e_cmd) - + if state in ['installed', 'present', 'latest']: if module.params.get('update_cache'): @@ -882,10 +883,11 @@ def main(): if params['install_repoquery'] and not repoquery and not module.check_mode: install_yum_utils(module) + repoquerybin = module.get_bin_path('repoquery', required=False) if params['list']: - if not repoquery: + if not repoquerybin: module.fail_json(msg="repoquery is required to use list= with this module. Please install the yum-utils package.") - results = dict(results=list_stuff(module, params['conf_file'], params['list'])) + results = dict(results=list_stuff(module, repoquerybin, params['conf_file'], params['list'])) module.exit_json(**results) else: From 47113727eed38741c9a0e79324e4495dc20db161 Mon Sep 17 00:00:00 2001 From: Julia Kreger Date: Fri, 1 May 2015 09:51:59 -0400 Subject: [PATCH 223/464] Updating os_ironic_node module Updating the os_ironic_node module to the most recent version including support for power and maintenance states. --- cloud/openstack/os_ironic_node.py | 241 ++++++++++++++++++++++++++---- 1 file changed, 209 insertions(+), 32 deletions(-) diff --git a/cloud/openstack/os_ironic_node.py b/cloud/openstack/os_ironic_node.py index 386a5f9fe84..a50d6897e5c 100644 --- a/cloud/openstack/os_ironic_node.py +++ b/cloud/openstack/os_ironic_node.py @@ -1,7 +1,7 @@ #!/usr/bin/python # coding: utf-8 -*- -# (c) 2014, Hewlett-Packard Development Company, L.P. +# (c) 2015, Hewlett-Packard Development Company, L.P. # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -25,7 +25,6 @@ except ImportError: DOCUMENTATION = ''' --- module: os_ironic_node -version_added: "1.10" short_description: Activate/Deactivate Bare Metal Resources from OpenStack extends_documentation_fragment: openstack description: @@ -36,6 +35,13 @@ options: - Indicates desired state of the resource choices: ['present', 'absent'] default: present + deploy: + description: + - Indicates if the resource should be deployed. Allows for deployment + logic to be disengaged and control of the node power or maintenance + state to be changed. + choices: ['true', 'false'] + default: true uuid: description: - globally unique identifier (UUID) to be given to the resource. @@ -44,7 +50,7 @@ options: ironic_url: description: - If noauth mode is utilized, this is required to be set to the - endpoint URL for the Ironic API. Use with "auth" and "auth_plugin" + endpoint URL for the Ironic API. Use with "auth" and "auth_type" settings set to None. required: false default: None @@ -57,7 +63,8 @@ options: instance_info: description: - Definition of the instance information which is used to deploy - the node. + the node. This information is only required when an instance is + set to present. image_source: description: - An HTTP(S) URL where the image can be retrieved from. @@ -67,6 +74,26 @@ options: image_disk_format: description: - The type of image that has been requested to be deployed. + power: + description: + - A setting to allow power state to be asserted allowing nodes + that are not yet deployed to be powered on, and nodes that + are deployed to be powered off. + choices: ['present', 'absent'] + default: present + maintenance: + description: + - A setting to allow the direct control if a node is in + maintenance mode. + required: false + default: false + maintenance_reason: + description: + - A string expression regarding the reason a node is in a + maintenance mode. + required: false + default: None + requirements: ["shade"] ''' @@ -76,6 +103,9 @@ os_ironic_node: cloud: "openstack" uuid: "d44666e1-35b3-4f6b-acb0-88ab7052da69" state: present + power: present + deploy: True + maintenance: False config_drive: "http://192.168.1.1/host-configdrive.iso" instance_info: image_source: "http://192.168.1.1/deploy_image.img" @@ -85,6 +115,16 @@ os_ironic_node: ''' +def _choose_id_value(module): + if module.params['uuid']: + return module.params['uuid'] + if module.params['name']: + return module.params['name'] + return None + + +# TODO(TheJulia): Change this over to use the machine patch method +# in shade once it is available. def _prepare_instance_info_patch(instance_info): patch = [] patch.append({ @@ -95,56 +135,193 @@ def _prepare_instance_info_patch(instance_info): return patch +def _is_true(value): + true_values = [True, 'yes', 'Yes', 'True', 'true', 'present', 'on'] + if value in true_values: + return True + return False + + +def _is_false(value): + false_values = [False, None, 'no', 'No', 'False', 'false', 'absent', 'off'] + if value in false_values: + return True + return False + + +def _check_set_maintenance(module, cloud, node): + if _is_true(module.params['maintenance']): + if _is_false(node['maintenance']): + cloud.set_machine_maintenance_state( + node['uuid'], + True, + reason=module.params['maintenance_reason']) + module.exit_json(changed=True, msg="Node has been set into " + "maintenance mode") + else: + # User has requested maintenance state, node is already in the + # desired state, checking to see if the reason has changed. + if (str(node['maintenance_reason']) not in + str(module.params['maintenance_reason'])): + cloud.set_machine_maintenance_state( + node['uuid'], + True, + reason=module.params['maintenance_reason']) + module.exit_json(changed=True, msg="Node maintenance reason " + "updated, cannot take any " + "additional action.") + elif _is_false(module.params['maintenance']): + if node['maintenance'] is True: + cloud.remove_machine_from_maintenance(node['uuid']) + return True + else: + module.fail_json(msg="maintenance parameter was set but a valid " + "the value was not recognized.") + return False + + +def _check_set_power_state(module, cloud, node): + if 'power on' in str(node['power_state']): + if _is_false(module.params['power']): + # User has requested the node be powered off. + cloud.set_machine_power_off(node['uuid']) + module.exit_json(changed=True, msg="Power requested off") + if 'power off' in str(node['power_state']): + if (_is_false(module.params['power']) and + _is_false(module.params['state'])): + return False + if (_is_false(module.params['power']) and + _is_false(module.params['state'])): + module.exit_json( + changed=False, + msg="Power for node is %s, node must be reactivated " + "OR set to state absent" + ) + # In the event the power has been toggled on and + # deployment has been requested, we need to skip this + # step. + if (_is_true(module.params['power']) and + _is_false(module.params['deploy'])): + # Node is powered down when it is not awaiting to be provisioned + cloud.set_machine_power_on(node['uuid']) + return True + # Default False if no action has been taken. + return False + + def main(): argument_spec = openstack_full_argument_spec( - uuid=dict(required=True), - instance_info=dict(type='dict', required=True), + uuid=dict(required=False), + name=dict(required=False), + instance_info=dict(type='dict', required=False), config_drive=dict(required=False), ironic_url=dict(required=False), + state=dict(required=False, default='present'), + maintenance=dict(required=False), + maintenance_reason=dict(required=False), + power=dict(required=False, default='present'), + deploy=dict(required=False, default=True), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') - if (module.params['auth_plugin'] == 'None' and + if (module.params['auth_type'] in [None, 'None'] and module.params['ironic_url'] is None): module.fail_json(msg="Authentication appears disabled, Please " "define an ironic_url parameter") - if module.params['ironic_url'] and module.params['auth_plugin'] == 'None': - module.params['auth'] = dict(endpoint=module.params['ironic_url']) + if (module.params['ironic_url'] and + module.params['auth_type'] in [None, 'None']): + module.params['auth'] = dict( + endpoint=module.params['ironic_url'] + ) + + node_id = _choose_id_value(module) + if not node_id: + module.fail_json(msg="A uuid or name value must be defined " + "to use this module.") try: cloud = shade.operator_cloud(**module.params) - server = cloud.get_machine_by_uuid(module.params['uuid']) + node = cloud.get_machine(node_id) + + if node is None: + module.fail_json(msg="node not found") + + uuid = node['uuid'] instance_info = module.params['instance_info'] - uuid = module.params['uuid'] - if module.params['state'] == 'present': - if server is None: - module.fail_json(msg="node not found") - else: - # TODO: compare properties here and update if necessary - # ... but the interface for that is terrible! - if server.provision_state is "active": - module.exit_json( - changed=False, - result="Node already in an active state" - ) - - patch = _prepare_instance_info_patch(instance_info) - cloud.set_node_instance_info(uuid, patch) - cloud.validate_node(uuid) - cloud.activate_node(uuid, module.params['config_drive']) - # TODO: Add more error checking and a wait option. - module.exit_json(changed=False, result="node activated") - - if module.params['state'] == 'absent': - if server.provision_state is not "deleted": + changed = False + + # User has reqeusted desired state to be in maintenance state. + if module.params['state'] is 'maintenance': + module.params['maintenance'] = True + + if node['provision_state'] in [ + 'cleaning', + 'deleting', + 'wait call-back']: + module.fail_json(msg="Node is in %s state, cannot act upon the " + "request as the node is in a transition " + "state" % node['provision_state']) + # TODO(TheJulia) This is in-development code, that requires + # code in the shade library that is still in development. + if _check_set_maintenance(module, cloud, node): + if node['provision_state'] in 'active': + module.exit_json(changed=True, + result="Maintenance state changed") + changed = True + node = cloud.get_machine(node_id) + + if _check_set_power_state(module, cloud, node): + changed = True + node = cloud.get_machine(node_id) + + if _is_true(module.params['state']): + if _is_false(module.params['deploy']): + module.exit_json( + changed=changed, + result="User request has explicitly disabled " + "deployment logic" + ) + + if 'active' in node['provision_state']: + module.exit_json( + changed=changed, + result="Node already in an active state." + ) + + if instance_info is None: + module.fail_json( + changed=changed, + msg="When setting an instance to present, " + "instance_info is a required variable.") + + # TODO(TheJulia): Update instance info, however info is + # deployment specific. Perhaps consider adding rebuild + # support, although there is a known desire to remove + # rebuild support from Ironic at some point in the future. + patch = _prepare_instance_info_patch(instance_info) + cloud.set_node_instance_info(uuid, patch) + cloud.validate_node(uuid) + cloud.activate_node(uuid, module.params['config_drive']) + # TODO(TheJulia): Add more error checking and a wait option. + # We will need to loop, or just add the logic to shade, + # although this could be a very long running process as + # baremetal deployments are not a "quick" task. + module.exit_json(changed=changed, result="node activated") + + elif _is_false(module.params['state']): + if node['provision_state'] not in "deleted": cloud.purge_node_instance_info(uuid) cloud.deactivate_node(uuid) module.exit_json(changed=True, result="deleted") else: module.exit_json(changed=False, result="node not found") + else: + module.fail_json(msg="State must be present, absent, " + "maintenance, off") + except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From b1160ade9aa52121851562001d162d3faf7e40f8 Mon Sep 17 00:00:00 2001 From: Kamil Madac Date: Sun, 14 Jun 2015 19:48:00 +0200 Subject: [PATCH 224/464] Better error handling in supervisorctl module. If execution of supervisorctl was not successful (exit code > 0), module silently supress this error and returns changed = false, which turns to OK task state. This is very confusing, when supervisorctl needs authentication, and credentials are not specified in module or are incorrect, services are not restarted/started/stopped without raising an error. --- web_infrastructure/supervisorctl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web_infrastructure/supervisorctl.py b/web_infrastructure/supervisorctl.py index f75992b9a6a..ef86eec26a7 100644 --- a/web_infrastructure/supervisorctl.py +++ b/web_infrastructure/supervisorctl.py @@ -183,14 +183,14 @@ def main(): if module.check_mode: module.exit_json(changed=True) for process_name in to_take_action_on: - rc, out, err = run_supervisorctl(action, process_name) + rc, out, err = run_supervisorctl(action, process_name, check_rc=True) if '%s: %s' % (process_name, expected_result) not in out: module.fail_json(msg=out) module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on) if state == 'restarted': - rc, out, err = run_supervisorctl('update') + rc, out, err = run_supervisorctl('update', check_rc=True) processes = get_matched_processes() take_action_on_processes(processes, lambda s: True, 'restart', 'started') From 9b7de40922efddedb6e084a752a67fbce57ea891 Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Thu, 13 Nov 2014 21:22:51 -0500 Subject: [PATCH 225/464] EC2 Security Group - Validate parameters. --- cloud/amazon/ec2_group.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_group.py b/cloud/amazon/ec2_group.py index 04598c74ad4..54e92e9784c 100644 --- a/cloud/amazon/ec2_group.py +++ b/cloud/amazon/ec2_group.py @@ -127,7 +127,7 @@ def make_rule_key(prefix, rule, group_id, cidr_ip): """Creates a unique key for an individual group rule""" if isinstance(rule, dict): proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')] - #fix for 11177 + #fix for 11177 if proto not in ['icmp', 'tcp', 'udp'] and from_port == -1 and to_port == -1: from_port = 'none' to_port = 'none' @@ -145,6 +145,22 @@ def addRulesToLookup(rules, prefix, dict): dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = (rule, grant) +def validate_rule(module, rule): + VALID_PARAMS = ('cidr_ip', + 'group_id', 'group_name', 'group_desc', + 'proto', 'from_port', 'to_port') + for k in rule: + if k not in VALID_PARAMS: + module.fail_json(msg='Invalid rule parameter \'{}\''.format(k)) + + if 'group_id' in rule and 'cidr_ip' in rule: + module.fail_json(msg='Specify group_id OR cidr_ip, not both') + elif 'group_name' in rule and 'cidr_ip' in rule: + module.fail_json(msg='Specify group_name OR cidr_ip, not both') + elif 'group_id' in rule and 'group_name' in rule: + module.fail_json(msg='Specify group_id OR group_name, not both') + + def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id): """ Returns tuple of (group_id, ip) after validating rule params. @@ -308,6 +324,8 @@ def main(): # Now, go through all provided rules and ensure they are there. if rules is not None: for rule in rules: + validate_rule(module, rule) + group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id) if target_group_created: changed = True @@ -353,6 +371,8 @@ def main(): # Now, go through all provided rules and ensure they are there. if rules_egress is not None: for rule in rules_egress: + validate_rule(module, rule) + group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id) if target_group_created: changed = True From 17c1fa08728c63826ac791096841539ba30bfd54 Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Thu, 13 Nov 2014 22:10:23 -0500 Subject: [PATCH 226/464] ec2_eip - PEP8 and minor style cleanups. --- cloud/amazon/ec2_eip.py | 125 +++++++++++++++++++++------------------- 1 file changed, 65 insertions(+), 60 deletions(-) diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py index 7258ea04759..2dc2c2bb4a2 100644 --- a/cloud/amazon/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -37,13 +37,15 @@ options: version_added: "1.4" reuse_existing_ip_allowed: description: - - Reuse an EIP that is not associated to an instance (when available), instead of allocating a new one. + - Reuse an EIP that is not associated to an instance (when available),''' +''' instead of allocating a new one. required: false default: false version_added: "1.6" wait_timeout: description: - - how long to wait in seconds for newly provisioned EIPs to become available + - how long to wait in seconds for newly provisioned EIPs to become''' +''' available default: 300 version_added: "1.7" @@ -53,9 +55,9 @@ notes: - This module will return C(public_ip) on success, which will contain the public IP address associated with the instance. - There may be a delay between the time the Elastic IP is assigned and when - the cloud instance is reachable via the new address. Use wait_for and pause - to delay further playbook execution until the instance is reachable, if - necessary. + the cloud instance is reachable via the new address. Use wait_for and + pause to delay further playbook execution until the instance is reachable, + if necessary. ''' EXAMPLES = ''' @@ -78,7 +80,8 @@ EXAMPLES = ''' ec2_eip: state='present' - name: provision new instances with ec2 - ec2: keypair=mykey instance_type=c1.medium image=ami-40603AD1 wait=yes group=webserver count=3 + ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes''' +''' group=webserver count=3 register: ec2 - name: associate new elastic IPs with each of the instances ec2_eip: "instance_id={{ item }}" @@ -97,10 +100,12 @@ try: except ImportError: HAS_BOTO = False -wait_timeout = 0 +wait_timeout = 0 + def associate_ip_and_instance(ec2, address, instance_id, module): - if ip_is_associated_with_instance(ec2, address.public_ip, instance_id, module): + if ip_is_associated_with_instance( + ec2, address.public_ip, instance_id, module): module.exit_json(changed=False, public_ip=address.public_ip) # If we're in check mode, nothing else to do @@ -108,21 +113,23 @@ def associate_ip_and_instance(ec2, address, instance_id, module): module.exit_json(changed=True) try: - if address.domain == "vpc": - res = ec2.associate_address(instance_id, allocation_id=address.allocation_id) + if address.domain == 'vpc': + res = ec2.associate_address(instance_id, + allocation_id=address.allocation_id) else: - res = ec2.associate_address(instance_id, public_ip=address.public_ip) + res = ec2.associate_address(instance_id, + public_ip=address.public_ip) except boto.exception.EC2ResponseError, e: module.fail_json(msg=str(e)) - + if res: module.exit_json(changed=True, public_ip=address.public_ip) - else: - module.fail_json(msg="association failed") + module.fail_json(msg='association failed') def disassociate_ip_and_instance(ec2, address, instance_id, module): - if not ip_is_associated_with_instance(ec2, address.public_ip, instance_id, module): + if not ip_is_associated_with_instance( + ec2, address.public_ip, instance_id, module): module.exit_json(changed=False, public_ip=address.public_ip) # If we're in check mode, nothing else to do @@ -131,7 +138,8 @@ def disassociate_ip_and_instance(ec2, address, instance_id, module): try: if address.domain == "vpc": - res = ec2.disassociate_address(association_id=address.association_id) + res = ec2.disassociate_address( + association_id=address.association_id) else: res = ec2.disassociate_address(public_ip=address.public_ip) except boto.exception.EC2ResponseError, e: @@ -144,7 +152,7 @@ def disassociate_ip_and_instance(ec2, address, instance_id, module): def find_address(ec2, public_ip, module, fail_on_not_found=True): - """ Find an existing Elastic IP address """ + """ Find an existing Elastic IP address """ if wait_timeout != 0: timeout = time.time() + wait_timeout while timeout > time.time(): @@ -152,20 +160,21 @@ def find_address(ec2, public_ip, module, fail_on_not_found=True): addresses = ec2.get_all_addresses([public_ip]) break except boto.exception.EC2ResponseError, e: - if "Address '%s' not found." % public_ip in e.message : + if "Address '%s' not found." % public_ip in e.message: if not fail_on_not_found: return None else: module.fail_json(msg=str(e.message)) time.sleep(5) - + if timeout <= time.time(): - module.fail_json(msg = "wait for EIPs timeout on %s" % time.asctime()) + module.fail_json(msg="wait for EIPs timeout on %s" % + time.asctime()) else: try: addresses = ec2.get_all_addresses([public_ip]) except boto.exception.EC2ResponseError, e: - if "Address '%s' not found." % public_ip in e.message : + if "Address '%s' not found." % public_ip in e.message: if not fail_on_not_found: return None module.fail_json(msg=str(e.message)) @@ -178,8 +187,8 @@ def ip_is_associated_with_instance(ec2, public_ip, instance_id, module): address = find_address(ec2, public_ip, module) if address: return address.instance_id == instance_id - else: - return False + return False + def instance_is_associated(ec2, instance, module): """ @@ -192,6 +201,7 @@ def instance_is_associated(ec2, instance, module): eip = find_address(ec2, instance_ip, module, fail_on_not_found=False) return (eip and (eip.public_ip == instance_ip)) + def allocate_address(ec2, domain, module, reuse_existing_ip_allowed): """ Allocate a new elastic IP address (when needed) and return it """ # If we're in check mode, nothing else to do @@ -199,33 +209,27 @@ def allocate_address(ec2, domain, module, reuse_existing_ip_allowed): module.exit_json(change=True) if reuse_existing_ip_allowed: - if domain: - domain_filter = { 'domain' : domain } - else: - domain_filter = { 'domain' : 'standard' } - all_addresses = ec2.get_all_addresses(filters=domain_filter) - - unassociated_addresses = filter(lambda a: not a.instance_id, all_addresses) - if unassociated_addresses: - address = unassociated_addresses[0]; - else: - address = ec2.allocate_address(domain=domain) - else: - address = ec2.allocate_address(domain=domain) + domain_filter = {'domain': domain or 'standard'} + all_addresses = ec2.get_all_addresses(filters=domain_filter) - return address + unassociated_addresses = [a for a in all_addresses + if not a.instance_id] + if unassociated_addresses: + return unassociated_addresses[0] + + return ec2.allocate_address(domain=domain) def release_address(ec2, public_ip, module): """ Release a previously allocated elastic IP address """ - + address = find_address(ec2, public_ip, module) - + # If we're in check mode, nothing else to do if module.check_mode: module.exit_json(change=True) - - res = address.release() + + res = address.release() if res: module.exit_json(changed=True) else: @@ -234,19 +238,19 @@ def release_address(ec2, public_ip, module): def find_instance(ec2, instance_id, module): """ Attempt to find the EC2 instance and return it """ - + try: reservations = ec2.get_all_reservations(instance_ids=[instance_id]) except boto.exception.EC2ResponseError, e: module.fail_json(msg=str(e)) - + if len(reservations) == 1: instances = reservations[0].instances if len(instances) == 1: return instances[0] - + module.fail_json(msg="could not find instance" + instance_id) - + def allocate_eip(ec2, eip_domain, module, reuse_existing_ip_allowed, new_eip_timeout): # Allocate a new elastic IP @@ -260,15 +264,15 @@ def allocate_eip(ec2, eip_domain, module, reuse_existing_ip_allowed, new_eip_tim def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( - instance_id = dict(required=False), - public_ip = dict(required=False, aliases= ['ip']), - state = dict(required=False, default='present', - choices=['present', 'absent']), - in_vpc = dict(required=False, type='bool', default=False), - reuse_existing_ip_allowed = dict(required=False, type='bool', default=False), - wait_timeout = dict(default=300), - ) - ) + instance_id=dict(required=False), + public_ip=dict(required=False, aliases=['ip']), + state=dict(required=False, default='present', + choices=['present', 'absent']), + in_vpc=dict(required=False, type='bool', default=False), + reuse_existing_ip_allowed=dict(required=False, type='bool', + default=False), + wait_timeout=dict(default=300), + )) module = AnsibleModule( argument_spec=argument_spec, @@ -284,7 +288,7 @@ def main(): public_ip = module.params.get('public_ip') state = module.params.get('state') in_vpc = module.params.get('in_vpc') - domain = "vpc" if in_vpc else None + domain = 'vpc' if in_vpc else None reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') new_eip_timeout = int(module.params.get('wait_timeout')) @@ -300,6 +304,7 @@ def main(): if public_ip: address = find_address(ec2, public_ip, module) + # Allocate an IP for instance since no public_ip was provided if instance_id and not public_ip: instance = find_instance(ec2, instance_id, module) @@ -316,22 +321,22 @@ def main(): address = allocate_eip( ec2, domain, module, reuse_existing_ip_allowed, new_eip_timeout) - # Associate address object (provided or allocated) with instance + # Associate address object (provided or allocated) with instance associate_ip_and_instance(ec2, address, instance_id, module) else: - #disassociating address from instance + # disassociating address from instance if instance_id: address = find_address(ec2, public_ip, module) disassociate_ip_and_instance(ec2, address, instance_id, module) - #releasing address + # releasing address else: release_address(ec2, public_ip, module) # import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * +from ansible.module_utils.basic import * # noqa +from ansible.module_utils.ec2 import * # noqa if __name__ == '__main__': main() From c36e8947bcfae48d07347d3fd32f1aa345958bb7 Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Thu, 13 Nov 2014 23:05:49 -0500 Subject: [PATCH 227/464] Refactoring of ec2_eip module. --- cloud/amazon/ec2_eip.py | 226 ++++++++++++++++------------------------ 1 file changed, 90 insertions(+), 136 deletions(-) diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py index 2dc2c2bb4a2..1fa551c610d 100644 --- a/cloud/amazon/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -100,114 +100,72 @@ try: except ImportError: HAS_BOTO = False -wait_timeout = 0 +class EIPException(Exception): + pass -def associate_ip_and_instance(ec2, address, instance_id, module): - if ip_is_associated_with_instance( - ec2, address.public_ip, instance_id, module): - module.exit_json(changed=False, public_ip=address.public_ip) - # If we're in check mode, nothing else to do - if module.check_mode: - module.exit_json(changed=True) +def associate_ip_and_instance(ec2, address, instance_id, check_mode): + if address_is_associated_with_instance(ec2, address, instance_id): + return {'changed': False} - try: + # If we're in check mode, nothing else to do + if not check_mode: if address.domain == 'vpc': res = ec2.associate_address(instance_id, allocation_id=address.allocation_id) else: res = ec2.associate_address(instance_id, public_ip=address.public_ip) - except boto.exception.EC2ResponseError, e: - module.fail_json(msg=str(e)) + if not res: + raise EIPException('association failed') - if res: - module.exit_json(changed=True, public_ip=address.public_ip) - module.fail_json(msg='association failed') + return {'changed': True} -def disassociate_ip_and_instance(ec2, address, instance_id, module): - if not ip_is_associated_with_instance( - ec2, address.public_ip, instance_id, module): - module.exit_json(changed=False, public_ip=address.public_ip) +def disassociate_ip_and_instance(ec2, address, instance_id, check_mode): + if not address_is_associated_with_instance(ec2, address, instance_id): + return {'changed': False} # If we're in check mode, nothing else to do - if module.check_mode: - module.exit_json(changed=True) - - try: - if address.domain == "vpc": + if not check_mode: + if address.domain == 'vpc': res = ec2.disassociate_address( association_id=address.association_id) else: res = ec2.disassociate_address(public_ip=address.public_ip) - except boto.exception.EC2ResponseError, e: - module.fail_json(msg=str(e)) - if res: - module.exit_json(changed=True) - else: - module.fail_json(msg="disassociation failed") + if not res: + raise EIPException('disassociation failed') + return {'changed': True} -def find_address(ec2, public_ip, module, fail_on_not_found=True): + +def find_address(ec2, public_ip, wait_timeout): """ Find an existing Elastic IP address """ - if wait_timeout != 0: - timeout = time.time() + wait_timeout - while timeout > time.time(): - try: - addresses = ec2.get_all_addresses([public_ip]) - break - except boto.exception.EC2ResponseError, e: - if "Address '%s' not found." % public_ip in e.message: - if not fail_on_not_found: - return None - else: - module.fail_json(msg=str(e.message)) - time.sleep(5) - - if timeout <= time.time(): - module.fail_json(msg="wait for EIPs timeout on %s" % - time.asctime()) - else: + deadline = time.time() + wait_timeout + while True: try: - addresses = ec2.get_all_addresses([public_ip]) - except boto.exception.EC2ResponseError, e: - if "Address '%s' not found." % public_ip in e.message: - if not fail_on_not_found: - return None - module.fail_json(msg=str(e.message)) + return ec2.get_all_addresses([public_ip])[0] + except boto.exception.EC2ResponseError as e: + if "Address '{}' not found.".format(public_ip) not in e.message: + raise - return addresses[0] + if time.time() >= deadline: + raise EIPException('wait for EIPs timeout on {}' + .format(time.asctime())) + time.sleep(5) -def ip_is_associated_with_instance(ec2, public_ip, instance_id, module): +def address_is_associated_with_instance(ec2, address, instance_id): """ Check if the elastic IP is currently associated with the instance """ - address = find_address(ec2, public_ip, module) if address: - return address.instance_id == instance_id + return address and address.instance_id == instance_id return False -def instance_is_associated(ec2, instance, module): - """ - Check if the given instance object is already associated with an - elastic IP - """ - instance_ip = instance.ip_address - if not instance_ip: - return False - eip = find_address(ec2, instance_ip, module, fail_on_not_found=False) - return (eip and (eip.public_ip == instance_ip)) - - -def allocate_address(ec2, domain, module, reuse_existing_ip_allowed): +def allocate_address(ec2, domain, reuse_existing_ip_allowed): """ Allocate a new elastic IP address (when needed) and return it """ - # If we're in check mode, nothing else to do - if module.check_mode: - module.exit_json(change=True) - if reuse_existing_ip_allowed: domain_filter = {'domain': domain or 'standard'} all_addresses = ec2.get_all_addresses(filters=domain_filter) @@ -220,45 +178,67 @@ def allocate_address(ec2, domain, module, reuse_existing_ip_allowed): return ec2.allocate_address(domain=domain) -def release_address(ec2, public_ip, module): +def release_address(ec2, address, check_mode): """ Release a previously allocated elastic IP address """ - address = find_address(ec2, public_ip, module) - # If we're in check mode, nothing else to do - if module.check_mode: - module.exit_json(change=True) + if not check_mode: + if not address.release(): + EIPException('release failed') - res = address.release() - if res: - module.exit_json(changed=True) - else: - module.fail_json(msg="release failed") + return {'changed': True} -def find_instance(ec2, instance_id, module): +def find_instance(ec2, instance_id): """ Attempt to find the EC2 instance and return it """ - try: - reservations = ec2.get_all_reservations(instance_ids=[instance_id]) - except boto.exception.EC2ResponseError, e: - module.fail_json(msg=str(e)) + reservations = ec2.get_all_reservations(instance_ids=[instance_id]) if len(reservations) == 1: instances = reservations[0].instances if len(instances) == 1: return instances[0] - module.fail_json(msg="could not find instance" + instance_id) + raise EIPException("could not find instance" + instance_id) + + +def ensure_present(ec2, domain, address, instance_id, + reuse_existing_ip_allowed, check_mode): + changed = False + # Return the EIP object since we've been given a public IP + if not address: + if check_mode: + return {'changed': True} -def allocate_eip(ec2, eip_domain, module, reuse_existing_ip_allowed, new_eip_timeout): - # Allocate a new elastic IP - address = allocate_address(ec2, eip_domain, module, reuse_existing_ip_allowed) - # overriding the timeout since this is a a newly provisioned ip - global wait_timeout - wait_timeout = new_eip_timeout - return address + address = allocate_address(ec2, domain, reuse_existing_ip_allowed) + changed = True + + if instance_id: + # Allocate an IP for instance since no public_ip was provided + instance = find_instance(ec2, instance_id) + if instance.vpc_id: + domain = 'vpc' + + # Associate address object (provided or allocated) with instance + assoc_result = associate_ip_and_instance(ec2, address, instance_id, + check_mode) + changed = changed or assoc_result['changed'] + + return {'changed': changed, 'public_ip': address.public_ip} + + +def ensure_absent(ec2, domain, address, check_mode): + if not address: + return {'changed': False} + + # disassociating address from instance + if instance_id: + return disassociate_ip_and_instance(ec2, address, instance_id, + check_mode) + # releasing address + else: + return release_address(ec2, address, check_mode) def main(): @@ -290,48 +270,22 @@ def main(): in_vpc = module.params.get('in_vpc') domain = 'vpc' if in_vpc else None reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') - new_eip_timeout = int(module.params.get('wait_timeout')) + wait_timeout = int(module.params.get('wait_timeout')) - if state == 'present': - # If both instance_id and public_ip are not specified, allocate a new - # elastic IP, and exit. - if not instance_id and not public_ip: - address = allocate_eip(ec2, domain, module, - reuse_existing_ip_allowed, new_eip_timeout) - module.exit_json(changed=True, public_ip=address.public_ip) - - # Return the EIP object since we've been given a public IP + try: if public_ip: - address = find_address(ec2, public_ip, module) + address = find_address(ec2, public_ip, wait_timeout) - # Allocate an IP for instance since no public_ip was provided - if instance_id and not public_ip: - instance = find_instance(ec2, instance_id, module) - - if instance.vpc_id: - domain = "vpc" - - # Do nothing if the instance is already associated with an - # elastic IP. - if instance_is_associated(ec2, instance, module): - module.exit_json(changed=False, public_ip=instance.ip_address) - - # If the instance is not already associated with an elastic IP, - # allocate a new one. - address = allocate_eip( - ec2, domain, module, reuse_existing_ip_allowed, new_eip_timeout) - - # Associate address object (provided or allocated) with instance - associate_ip_and_instance(ec2, address, instance_id, module) - - else: - # disassociating address from instance - if instance_id: - address = find_address(ec2, public_ip, module) - disassociate_ip_and_instance(ec2, address, instance_id, module) - # releasing address + if state == 'present': + result = ensure_present(ec2, domain, address, instance_id, + reuse_existing_ip_allowed, + module.check_mode) else: - release_address(ec2, public_ip, module) + result = ensure_absent(ec2, domain, address, module.check_mode) + except (boto.exception.EC2ResponseError, EIPException) as e: + module.fail_json(msg=str(e)) + + module.exit_json(**result) # import module snippets From 063d3d97c6dcbdc304cf05f735cba02c7200f78a Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Fri, 14 Nov 2014 00:31:45 -0500 Subject: [PATCH 228/464] Remove unnecessary wait_timeout parameter. --- cloud/amazon/ec2_eip.py | 42 ++++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py index 1fa551c610d..2f358d13622 100644 --- a/cloud/amazon/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -42,12 +42,6 @@ options: required: false default: false version_added: "1.6" - wait_timeout: - description: - - how long to wait in seconds for newly provisioned EIPs to become''' -''' available - default: 300 - version_added: "1.7" extends_documentation_fragment: aws author: Lorin Hochstein @@ -141,20 +135,26 @@ def disassociate_ip_and_instance(ec2, address, instance_id, check_mode): return {'changed': True} -def find_address(ec2, public_ip, wait_timeout): - """ Find an existing Elastic IP address """ - deadline = time.time() + wait_timeout - while True: - try: - return ec2.get_all_addresses([public_ip])[0] - except boto.exception.EC2ResponseError as e: - if "Address '{}' not found.".format(public_ip) not in e.message: - raise +def _find_address_by_ip(ec2, public_ip): + try: + return ec2.get_all_addresses([public_ip])[0] + except boto.exception.EC2ResponseError as e: + if "Address '{}' not found.".format(public_ip) not in e.message: + raise + - if time.time() >= deadline: - raise EIPException('wait for EIPs timeout on {}' - .format(time.asctime())) - time.sleep(5) +def _find_address_by_instance_id(ec2, instance_id): + addresses = ec2.get_all_addresses(None, {'instance-id': instance_id}) + if addresses: + return addresses[0] + + +def find_address(ec2, public_ip, instance_id): + """ Find an existing Elastic IP address """ + if public_ip: + return _find_address_by_ip(ec2, public_ip) + elif instance_id: + return _find_address_by_instance_id(ec2, instance_id) def address_is_associated_with_instance(ec2, address, instance_id): @@ -270,11 +270,9 @@ def main(): in_vpc = module.params.get('in_vpc') domain = 'vpc' if in_vpc else None reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') - wait_timeout = int(module.params.get('wait_timeout')) try: - if public_ip: - address = find_address(ec2, public_ip, wait_timeout) + address = find_address(ec2, public_ip, instance_id) if state == 'present': result = ensure_present(ec2, domain, address, instance_id, From 942f0ce96ca96a60af80dcb01d5b12215d01f428 Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Sun, 14 Jun 2015 15:53:56 -0400 Subject: [PATCH 229/464] Fix missing instance_id param in ec2_eip. --- cloud/amazon/ec2_eip.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py index 2f358d13622..b24032b8d4e 100644 --- a/cloud/amazon/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -228,7 +228,7 @@ def ensure_present(ec2, domain, address, instance_id, return {'changed': changed, 'public_ip': address.public_ip} -def ensure_absent(ec2, domain, address, check_mode): +def ensure_absent(ec2, domain, address, instance_id, check_mode): if not address: return {'changed': False} @@ -279,7 +279,7 @@ def main(): reuse_existing_ip_allowed, module.check_mode) else: - result = ensure_absent(ec2, domain, address, module.check_mode) + result = ensure_absent(ec2, domain, address, instance_id, module.check_mode) except (boto.exception.EC2ResponseError, EIPException) as e: module.fail_json(msg=str(e)) From de617bb3ae459b03a9186b07095565168efbb839 Mon Sep 17 00:00:00 2001 From: Tyler Cross Date: Thu, 14 May 2015 08:47:07 -0400 Subject: [PATCH 230/464] Get specific object version with S3 module. - allow specifying version for mode=get and mode=getstr - when version specified doesn't exist give an error message that indicates so --- cloud/amazon/s3.py | 63 ++++++++++++++++++++++++++++++---------------- 1 file changed, 42 insertions(+), 21 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 7ad9e6ec32f..0992eac8362 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -73,6 +73,13 @@ options: - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. required: false default: null + version: + description: + - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket. + required: false + default: null + aliases: [] + version_added: "2.0" overwrite: description: - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. @@ -114,6 +121,9 @@ EXAMPLES = ''' # Simple GET operation - s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get +# Get a specific version of an object. +- s3: bucket=mybucket object=/my/desired/key.txt version=48c9ee5131af7a716edc22df9772aa6f dest=/usr/local/myfile.txt mode=get + # PUT/upload with metadata - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache' @@ -147,20 +157,23 @@ try: except ImportError: HAS_BOTO = False -def key_check(module, s3, bucket, obj): +def key_check(module, s3, bucket, obj, version=None): try: bucket = s3.lookup(bucket) - key_check = bucket.get_key(obj) + key_check = bucket.get_key(obj, version_id=version) except s3.provider.storage_response_error, e: - module.fail_json(msg= str(e)) + if version is not None and e.status == 400: # If a specified version doesn't exist a 400 is returned. + key_check = None + else: + module.fail_json(msg=str(e)) if key_check: return True else: return False -def keysum(module, s3, bucket, obj): +def keysum(module, s3, bucket, obj, version=None): bucket = s3.lookup(bucket) - key_check = bucket.get_key(obj) + key_check = bucket.get_key(obj, version_id=version) if not key_check: return None md5_remote = key_check.etag[1:-1] @@ -246,11 +259,11 @@ def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt): except s3.provider.storage_copy_error, e: module.fail_json(msg= str(e)) -def download_s3file(module, s3, bucket, obj, dest, retries): +def download_s3file(module, s3, bucket, obj, dest, retries, version=None): # retries is the number of loops; range/xrange needs to be one # more to get that count of loops. bucket = s3.lookup(bucket) - key = bucket.lookup(obj) + key = bucket.get_key(obj, version_id=version) for x in range(0, retries + 1): try: key.get_contents_to_filename(dest) @@ -264,10 +277,10 @@ def download_s3file(module, s3, bucket, obj, dest, retries): # otherwise, try again, this may be a transient timeout. pass -def download_s3str(module, s3, bucket, obj): +def download_s3str(module, s3, bucket, obj, version=None): try: bucket = s3.lookup(bucket) - key = bucket.lookup(obj) + key = bucket.get_key(obj, version_id=version) contents = key.get_contents_as_string() module.exit_json(msg="GET operation complete", contents=contents, changed=True) except s3.provider.storage_copy_error, e: @@ -317,6 +330,7 @@ def main(): metadata = dict(type='dict'), mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj'], required=True), object = dict(), + version = dict(default=None), overwrite = dict(aliases=['force'], default='always'), retries = dict(aliases=['retry'], type='int', default=0), s3_url = dict(aliases=['S3_URL']), @@ -336,6 +350,7 @@ def main(): metadata = module.params.get('metadata') mode = module.params.get('mode') obj = module.params.get('object') + version = module.params.get('version') overwrite = module.params.get('overwrite') retries = module.params.get('retries') s3_url = module.params.get('s3_url') @@ -408,29 +423,34 @@ def main(): module.fail_json(msg="Target bucket cannot be found", failed=True) # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check. - keyrtn = key_check(module, s3, bucket, obj) + keyrtn = key_check(module, s3, bucket, obj, version=version) if keyrtn is False: - module.fail_json(msg="Target key cannot be found", failed=True) + if version is not None: + module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True) + else: + module.fail_json(msg="Key %s does not exist."%obj, failed=True) # If the destination path doesn't exist, no need to md5um etag check, so just download. pathrtn = path_check(dest) if pathrtn is False: - download_s3file(module, s3, bucket, obj, dest, retries) + download_s3file(module, s3, bucket, obj, dest, retries, version=version) # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists. if pathrtn is True: - md5_remote = keysum(module, s3, bucket, obj) + md5_remote = keysum(module, s3, bucket, obj, version=version) md5_local = get_md5_digest(dest) + if md5_local == md5_remote: sum_matches = True if overwrite == 'always': - download_s3file(module, s3, bucket, obj, dest, retries) + download_s3file(module, s3, bucket, obj, dest, retries, version=version) else: module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False) else: sum_matches = False + if overwrite in ('always', 'different'): - download_s3file(module, s3, bucket, obj, dest, retries) + download_s3file(module, s3, bucket, obj, dest, retries, version=version) else: module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.") @@ -440,9 +460,7 @@ def main(): # At this point explicitly define the overwrite condition. if sum_matches is True and pathrtn is True and overwrite == 'always': - download_s3file(module, s3, bucket, obj, dest, retries) - - # If sum does not match but the destination exists, we + download_s3file(module, s3, bucket, obj, dest, retries, version=version) # if our mode is a PUT operation (upload), go through the procedure as appropriate ... if mode == 'put': @@ -563,11 +581,14 @@ def main(): if bucketrtn is False: module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True) else: - keyrtn = key_check(module, s3, bucket, obj) + keyrtn = key_check(module, s3, bucket, obj, version=version) if keyrtn is True: - download_s3str(module, s3, bucket, obj) + download_s3str(module, s3, bucket, obj, version=version) else: - module.fail_json(msg="Key %s does not exist."%obj, failed=True) + if version is not None: + module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True) + else: + module.fail_json(msg="Key %s does not exist."%obj, failed=True) module.exit_json(failed=False) From ef7a75938a657792ba67d74e686371251b2709ad Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 15 Jun 2015 09:51:15 -0700 Subject: [PATCH 231/464] Further optimizations pointed out by @kustodian in #1516 * Only install yum-utils if needed (b/c we're going to use repoquery) * Add a warning message explaining that why slower repoquery was used rather than yum API. --- packaging/os/yum.py | 86 ++++++++++++++++++++++++--------------------- 1 file changed, 45 insertions(+), 41 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 61fc9f53333..ece8b2407bd 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -165,12 +165,17 @@ def yum_base(conf_file=None): my.preconf.fn = conf_file return my -def install_yum_utils(module): +def ensure_yum_utils(module): - if not module.check_mode: + repoquerybin = module.get_bin_path('repoquery', required=False) + + if module.params['install_repoquery'] and not repoquerybin and not module.check_mode: yum_path = module.get_bin_path('yum') if yum_path: - rc, so, se = module.run_command('%s -y install yum-utils' % yum_path) + rc, so, se = module.run_command('%s -y install yum-utils' % yum_path) + repoquerybin = module.get_bin_path('repoquery', required=False) + + return repoquerybin def po_to_nevra(po): @@ -313,7 +318,7 @@ def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, dis_r else: module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err)) - return [] + return set() def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=None, dis_repos=None): if en_repos is None: @@ -365,7 +370,7 @@ def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=None, else: module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2)) - return [] + return set() def transaction_exists(pkglist): """ @@ -626,8 +631,8 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): shutil.rmtree(tempdir) except Exception, e: module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e)) - - module.exit_json(**res) + + return res def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): @@ -681,8 +686,8 @@ def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): if rc != 0: module.fail_json(**res) - - module.exit_json(**res) + + return res def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): @@ -764,30 +769,15 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): else: res['changed'] = True - module.exit_json(**res) + return res def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo, - disable_gpg_check, exclude): + disable_gpg_check, exclude, repoq): yumbin = module.get_bin_path('yum') # need debug level 2 to get 'Nothing to do' for groupinstall. yum_basecmd = [yumbin, '-d', '2', '-y'] - # If rhn-plugin is installed and no rhn-certificate is available on the - # system then users will see an error message using the yum API. Use - # repoquery in those cases. - - my = yum_base(conf_file) - # A sideeffect of accessing conf is that the configuration is - # loaded and plugins are discovered - my.conf - - repoq = None - if 'rhnplugin' in my.plugins._plugins: - repoquery = module.get_bin_path('repoquery', required=False) - if repoquery: - repoq = [repoquery, '--show-duplicates', '--plugins', '--quiet'] - if conf_file and os.path.exists(conf_file): yum_basecmd += ['-c', conf_file] if repoq: @@ -834,16 +824,19 @@ def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo, if state in ['installed', 'present']: if disable_gpg_check: yum_basecmd.append('--nogpgcheck') - install(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos) + res = install(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos) elif state in ['removed', 'absent']: - remove(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos) + res = remove(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos) elif state == 'latest': if disable_gpg_check: yum_basecmd.append('--nogpgcheck') - latest(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos) + res = latest(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos) + else: + # should be caught by AnsibleModule argument_spec + module.fail_json(msg="we should never get here unless this all + failed", changed=False, results='', errors='unepected state') - # should be caught by AnsibleModule argument_spec - return dict(changed=False, failed=True, results='', errors='unexpected state') + return res def main(): @@ -878,28 +871,39 @@ def main(): supports_check_mode = True ) - # this should not be needed, but exists as a failsafe - params = module.params - if params['install_repoquery'] and not repoquery and not module.check_mode: - install_yum_utils(module) - - repoquerybin = module.get_bin_path('repoquery', required=False) if params['list']: + repoquerybin = ensure_yum_utils(module) if not repoquerybin: module.fail_json(msg="repoquery is required to use list= with this module. Please install the yum-utils package.") results = dict(results=list_stuff(module, repoquerybin, params['conf_file'], params['list'])) - module.exit_json(**results) else: + # If rhn-plugin is installed and no rhn-certificate is available on + # the system then users will see an error message using the yum API. + # Use repoquery in those cases. + + my = yum_base(conf_file) + # A sideeffect of accessing conf is that the configuration is + # loaded and plugins are discovered + my.conf + repoquery = None + if 'rhnplugin' in my.plugins._plugins: + repoquerybin = ensure_yum_utils(module) + if repoquerybin: + repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet'] + pkg = [ p.strip() for p in params['name']] exclude = params['exclude'] state = params['state'] enablerepo = params.get('enablerepo', '') disablerepo = params.get('disablerepo', '') disable_gpg_check = params['disable_gpg_check'] - res = ensure(module, state, pkg, params['conf_file'], enablerepo, - disablerepo, disable_gpg_check, exclude) - module.fail_json(msg="we should never get here unless this all failed", **res) + results = ensure(module, state, pkg, params['conf_file'], enablerepo, + disablerepo, disable_gpg_check, exclude, repoquery) + if repoquery: + results['msg'] = '%s %s' % (results.get('msg',''), 'Warning: Due to potential bad behaviour with rhnplugin and certificates, used slower repoquery calls instead of Yum API.') + + module.exit_json(**results) # import module snippets from ansible.module_utils.basic import * From 08c17814fb48be494e09909fd866d6bec3d61ff8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 15 Jun 2015 10:46:59 -0700 Subject: [PATCH 232/464] Fix incorrect line breaking --- packaging/os/yum.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index ece8b2407bd..6e2b61a189d 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -833,8 +833,8 @@ def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo, res = latest(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos) else: # should be caught by AnsibleModule argument_spec - module.fail_json(msg="we should never get here unless this all - failed", changed=False, results='', errors='unepected state') + module.fail_json(msg="we should never get here unless this all" + " failed", changed=False, results='', errors='unepected state') return res From 28a869a030fb3be9818c71e08d3048ff88a2b6a0 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Mon, 15 Jun 2015 14:41:22 -0400 Subject: [PATCH 233/464] Updating cloud modules with proper github author information --- cloud/amazon/cloudformation.py | 2 +- cloud/amazon/ec2.py | 5 ++++- cloud/amazon/ec2_ami.py | 2 +- cloud/amazon/ec2_ami_find.py | 2 +- cloud/amazon/ec2_asg.py | 2 +- cloud/amazon/ec2_eip.py | 2 +- cloud/amazon/ec2_elb.py | 2 +- cloud/amazon/ec2_elb_lb.py | 2 +- cloud/amazon/ec2_facts.py | 2 +- cloud/amazon/ec2_group.py | 1 + cloud/amazon/ec2_key.py | 2 +- cloud/amazon/ec2_lc.py | 2 +- cloud/amazon/ec2_metric_alarm.py | 2 +- cloud/amazon/ec2_scaling_policy.py | 2 +- cloud/amazon/ec2_snapshot.py | 2 +- cloud/amazon/ec2_tag.py | 2 +- cloud/amazon/ec2_vol.py | 2 +- cloud/amazon/ec2_vpc.py | 2 +- cloud/amazon/elasticache.py | 2 +- cloud/amazon/iam.py | 4 +++- cloud/amazon/iam_policy.py | 2 +- cloud/amazon/rds.py | 5 ++++- cloud/amazon/rds_param_group.py | 2 +- cloud/amazon/rds_subnet_group.py | 2 +- cloud/amazon/route53.py | 2 +- cloud/amazon/s3.py | 4 +++- cloud/azure/azure.py | 2 +- cloud/digital_ocean/digital_ocean.py | 1 + cloud/digital_ocean/digital_ocean_domain.py | 1 + cloud/digital_ocean/digital_ocean_sshkey.py | 1 + cloud/docker/docker.py | 6 +++++- cloud/docker/docker_image.py | 2 +- cloud/google/gc_storage.py | 2 +- cloud/google/gce.py | 2 +- cloud/google/gce_lb.py | 2 +- cloud/google/gce_net.py | 2 +- cloud/google/gce_pd.py | 2 +- cloud/linode/linode.py | 2 +- cloud/openstack/keystone_user.py | 2 +- cloud/openstack/nova_keypair.py | 3 +++ cloud/openstack/os_auth.py | 1 + cloud/openstack/os_client_config.py | 2 +- cloud/openstack/os_image.py | 1 + cloud/openstack/os_network.py | 1 + cloud/openstack/os_server.py | 1 + cloud/openstack/os_server_actions.py | 1 + cloud/openstack/os_server_facts.py | 1 + cloud/openstack/os_server_volume.py | 1 + cloud/openstack/os_subnet.py | 1 + cloud/openstack/os_volume.py | 1 + cloud/openstack/quantum_floating_ip.py | 3 +++ cloud/openstack/quantum_floating_ip_associate.py | 1 + cloud/openstack/quantum_router.py | 1 + cloud/openstack/quantum_router_gateway.py | 1 + cloud/openstack/quantum_router_interface.py | 1 + cloud/rackspace/rax.py | 4 +++- cloud/rackspace/rax_cbs.py | 4 +++- cloud/rackspace/rax_cbs_attachments.py | 4 +++- cloud/rackspace/rax_cdb.py | 2 +- cloud/rackspace/rax_cdb_database.py | 2 +- cloud/rackspace/rax_cdb_user.py | 2 +- cloud/rackspace/rax_dns.py | 2 +- cloud/rackspace/rax_dns_record.py | 2 +- cloud/rackspace/rax_facts.py | 2 +- cloud/rackspace/rax_files.py | 2 +- cloud/rackspace/rax_files_objects.py | 2 +- cloud/rackspace/rax_identity.py | 4 +++- cloud/rackspace/rax_keypair.py | 2 +- cloud/rackspace/rax_meta.py | 2 +- cloud/rackspace/rax_network.py | 4 +++- cloud/rackspace/rax_queue.py | 4 +++- cloud/rackspace/rax_scaling_group.py | 2 +- cloud/rackspace/rax_scaling_policy.py | 2 +- cloud/vmware/vsphere_guest.py | 2 +- 74 files changed, 104 insertions(+), 55 deletions(-) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index b774cbfefbb..dee292aeba3 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -94,7 +94,7 @@ options: required: false version_added: "2.0" -author: James S. Martin +author: "James S. Martin (@jsmartin)" extends_documentation_fragment: aws ''' diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 019e4902fce..fc1e8125b65 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -226,7 +226,10 @@ options: default: null aliases: [] -author: Seth Vidal, Tim Gerla, Lester Wade +author: + - "Tim Gerla (@tgerla)" + - "Lester Wade (@lwade)" + - "Seth Vidal" extends_documentation_fragment: aws ''' diff --git a/cloud/amazon/ec2_ami.py b/cloud/amazon/ec2_ami.py index b9bb1bff72d..0d504ee3b0c 100644 --- a/cloud/amazon/ec2_ami.py +++ b/cloud/amazon/ec2_ami.py @@ -87,7 +87,7 @@ options: default: null version_added: "2.0" -author: Evan Duffield +author: "Evan Duffield (@scicoin-project) " extends_documentation_fragment: aws ''' diff --git a/cloud/amazon/ec2_ami_find.py b/cloud/amazon/ec2_ami_find.py index 2c83e0d3204..c8aa5d792df 100644 --- a/cloud/amazon/ec2_ami_find.py +++ b/cloud/amazon/ec2_ami_find.py @@ -25,7 +25,7 @@ description: - Can search AMIs with different owners - Can search by matching tag(s), by AMI name and/or other criteria - Results can be sorted and sliced -author: Tom Bamford +author: "Tom Bamford (@tombamford)" notes: - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com. - See the example below for a suggestion of how to search by distro/release. diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 6a38860a7e1..54d051375e6 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -21,7 +21,7 @@ description: - Can create or delete AWS Autoscaling Groups - Works with the ec2_lc module to manage Launch Configurations version_added: "1.6" -author: Gareth Rushgrove +author: "Gareth Rushgrove (@garethr)" options: state: description: diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py index b24032b8d4e..c3b764b2e63 100644 --- a/cloud/amazon/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -44,7 +44,7 @@ options: version_added: "1.6" extends_documentation_fragment: aws -author: Lorin Hochstein +author: "Lorin Hochstein (@lorin) " notes: - This module will return C(public_ip) on success, which will contain the public IP address associated with the instance. diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py index f5f5a487dfa..6530a00bcb9 100644 --- a/cloud/amazon/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -25,7 +25,7 @@ description: if state=absent is passed as an argument. - Will be marked changed when called only if there are ELBs found to operate on. version_added: "1.2" -author: John Jarvis +author: "John Jarvis (@jarv)" options: state: description: diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 4a03542de66..566db2d329a 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -22,7 +22,7 @@ description: - Will be marked changed when called only if state is changed. short_description: Creates or destroys Amazon ELB. version_added: "1.5" -author: Jim Dalton +author: "Jim Dalton (@jsdalton)" options: state: description: diff --git a/cloud/amazon/ec2_facts.py b/cloud/amazon/ec2_facts.py index cf2a90aabc5..6bd587bf018 100644 --- a/cloud/amazon/ec2_facts.py +++ b/cloud/amazon/ec2_facts.py @@ -36,7 +36,7 @@ description: The module must be called from within the EC2 instance itself. notes: - Parameters to filter on ec2_facts may be added later. -author: "Silviu Dicu " +author: "Silviu Dicu (@silviud) " ''' EXAMPLES = ''' diff --git a/cloud/amazon/ec2_group.py b/cloud/amazon/ec2_group.py index 54e92e9784c..bde2f5cc19e 100644 --- a/cloud/amazon/ec2_group.py +++ b/cloud/amazon/ec2_group.py @@ -5,6 +5,7 @@ DOCUMENTATION = ''' --- module: ec2_group +author: "Andrew de Quincey (@adq)" version_added: "1.3" short_description: maintain an ec2 VPC security group. description: diff --git a/cloud/amazon/ec2_key.py b/cloud/amazon/ec2_key.py index 6bc9d936ee3..a9217bd69db 100644 --- a/cloud/amazon/ec2_key.py +++ b/cloud/amazon/ec2_key.py @@ -46,7 +46,7 @@ options: version_added: "1.6" extends_documentation_fragment: aws -author: Vincent Viallet +author: "Vincent Viallet (@zbal)" ''' EXAMPLES = ''' diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 5259479ab5f..3c292377a58 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -26,7 +26,7 @@ notes: after it is changed will not modify the launch configuration on AWS. You must create a new config and assign it to the ASG instead." version_added: "1.6" -author: Gareth Rushgrove +author: "Gareth Rushgrove (@garethr)" options: state: description: diff --git a/cloud/amazon/ec2_metric_alarm.py b/cloud/amazon/ec2_metric_alarm.py index aecd80ecdad..578a1af7297 100644 --- a/cloud/amazon/ec2_metric_alarm.py +++ b/cloud/amazon/ec2_metric_alarm.py @@ -21,7 +21,7 @@ description: - Can create or delete AWS metric alarms - Metrics you wish to alarm on must already exist version_added: "1.6" -author: Zacharie Eakin +author: "Zacharie Eakin (@zeekin)" options: state: description: diff --git a/cloud/amazon/ec2_scaling_policy.py b/cloud/amazon/ec2_scaling_policy.py index 707f462ec90..10f03e9fc46 100644 --- a/cloud/amazon/ec2_scaling_policy.py +++ b/cloud/amazon/ec2_scaling_policy.py @@ -7,7 +7,7 @@ description: - Can create or delete scaling policies for autoscaling groups - Referenced autoscaling groups must already exist version_added: "1.6" -author: Zacharie Eakin +author: "Zacharie Eakin (@zeekin)" options: state: description: diff --git a/cloud/amazon/ec2_snapshot.py b/cloud/amazon/ec2_snapshot.py index ae3df140cce..ee9d5ab1110 100644 --- a/cloud/amazon/ec2_snapshot.py +++ b/cloud/amazon/ec2_snapshot.py @@ -75,7 +75,7 @@ options: required: false version_added: "1.9" -author: Will Thames +author: "Will Thames (@willthames)" extends_documentation_fragment: aws ''' diff --git a/cloud/amazon/ec2_tag.py b/cloud/amazon/ec2_tag.py index f82e8ca782d..f79aea4cf25 100644 --- a/cloud/amazon/ec2_tag.py +++ b/cloud/amazon/ec2_tag.py @@ -42,7 +42,7 @@ options: default: null aliases: ['aws_region', 'ec2_region'] -author: Lester Wade +author: "Lester Wade (@lwade)" extends_documentation_fragment: aws ''' diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 3065b550457..921838284f4 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -107,7 +107,7 @@ options: default: present choices: ['absent', 'present', 'list'] version_added: "1.6" -author: Lester Wade +author: "Lester Wade (@lwade)" extends_documentation_fragment: aws ''' diff --git a/cloud/amazon/ec2_vpc.py b/cloud/amazon/ec2_vpc.py index 5f63e83eb66..611251e307f 100644 --- a/cloud/amazon/ec2_vpc.py +++ b/cloud/amazon/ec2_vpc.py @@ -100,7 +100,7 @@ options: required: true default: null aliases: ['aws_region', 'ec2_region'] -author: Carson Gee +author: "Carson Gee (@carsongee)" extends_documentation_fragment: aws ''' diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index bc51fd6d264..3ec0fc2e351 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -22,7 +22,7 @@ description: - Manage cache clusters in Amazon Elasticache. - Returns information about the specified cache cluster. version_added: "1.4" -author: Jim Dalton +author: "Jim Dalton (@jsdalton)" options: state: description: diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index d496a7a40c7..a7d0fbeee5b 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -97,7 +97,9 @@ options: aliases: [ 'ec2_access_key', 'access_key' ] notes: - 'Currently boto does not support the removal of Managed Policies, the module will error out if your user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.' -author: Jonathan I. Davila and Paul Seiffert (@defionscode) +author: + - "Jonathan I. Davila (@defionscode)" + - "Paul Seiffert (@seiffert)" extends_documentation_fragment: aws ''' diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index 284c765c104..f1a6abdd0a6 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -72,7 +72,7 @@ options: requirements: [ "boto" ] notes: - 'Currently boto does not support the removal of Managed Policies, the module will not work removing/adding managed policies.' -author: Jonathan I. Davila +author: "Jonathan I. Davila (@defionscode)" extends_documentation_fragment: aws ''' diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 5b152ace6b2..7dc1955c558 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -244,7 +244,10 @@ options: requirements: - "python >= 2.6" - "boto" -author: Bruce Pennypacker, Will Thames +author: + - "Bruce Pennypacker (@bpennypacker)" + - "Will Thames (@willthames") + ''' # FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD diff --git a/cloud/amazon/rds_param_group.py b/cloud/amazon/rds_param_group.py index cba482c72d6..7b875304810 100644 --- a/cloud/amazon/rds_param_group.py +++ b/cloud/amazon/rds_param_group.py @@ -67,7 +67,7 @@ options: required: true default: null aliases: ['aws_region', 'ec2_region'] -author: Scott Anderson +author: "Scott Anderson (@tastychutney)" extends_documentation_fragment: aws ''' diff --git a/cloud/amazon/rds_subnet_group.py b/cloud/amazon/rds_subnet_group.py index eb9c3693213..3b998c34225 100644 --- a/cloud/amazon/rds_subnet_group.py +++ b/cloud/amazon/rds_subnet_group.py @@ -53,7 +53,7 @@ options: required: true default: null aliases: ['aws_region', 'ec2_region'] -author: Scott Anderson +author: "Scott Anderson (@tastychutney)" extends_documentation_fragment: aws ''' diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index d6c758b3974..67700060d9f 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -93,7 +93,7 @@ options: required: false default: false version_added: "1.9" -author: Bruce Pennypacker +author: "Bruce Pennypacker (@bpennypacker)" extends_documentation_fragment: aws ''' diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 0992eac8362..9bec312294a 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -110,7 +110,9 @@ options: version_added: "1.3" requirements: [ "boto" ] -author: Lester Wade, Ralph Tice +author: + - "Lester Wade (@lwade)" + - "Ralph Tice (@ralph-tice)" extends_documentation_fragment: aws ''' diff --git a/cloud/azure/azure.py b/cloud/azure/azure.py index 3303fc01867..98984dfb9e6 100644 --- a/cloud/azure/azure.py +++ b/cloud/azure/azure.py @@ -114,7 +114,7 @@ options: requirements: - "python >= 2.6" - "azure >= 0.7.1" -author: John Whitbeck +author: "John Whitbeck (@jwhitbeck)" ''' EXAMPLES = ''' diff --git a/cloud/digital_ocean/digital_ocean.py b/cloud/digital_ocean/digital_ocean.py index f4475a104a7..d7b55bee693 100644 --- a/cloud/digital_ocean/digital_ocean.py +++ b/cloud/digital_ocean/digital_ocean.py @@ -22,6 +22,7 @@ short_description: Create/delete a droplet/SSH_key in DigitalOcean description: - Create/delete a droplet in DigitalOcean and optionally wait for it to be 'running', or deploy an SSH key. version_added: "1.3" +author: "Vincent Viallet (@zbal)" options: command: description: diff --git a/cloud/digital_ocean/digital_ocean_domain.py b/cloud/digital_ocean/digital_ocean_domain.py index bf6bf8679b0..905b6dae2d0 100644 --- a/cloud/digital_ocean/digital_ocean_domain.py +++ b/cloud/digital_ocean/digital_ocean_domain.py @@ -22,6 +22,7 @@ short_description: Create/delete a DNS record in DigitalOcean description: - Create/delete a DNS record in DigitalOcean. version_added: "1.6" +author: "Michael Gregson (@mgregson)" options: state: description: diff --git a/cloud/digital_ocean/digital_ocean_sshkey.py b/cloud/digital_ocean/digital_ocean_sshkey.py index 1304c756422..a509276bc48 100644 --- a/cloud/digital_ocean/digital_ocean_sshkey.py +++ b/cloud/digital_ocean/digital_ocean_sshkey.py @@ -22,6 +22,7 @@ short_description: Create/delete an SSH key in DigitalOcean description: - Create/delete an SSH key. version_added: "1.6" +author: "Michael Gregson (@mgregson)" options: state: description: diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index cb6d3dae075..44ed3ecf038 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -256,7 +256,11 @@ options: default: false version_added: "1.9" -author: Cove Schneider, Joshua Conner, Pavel Antonov, Ash Wilson +author: + - "Cove Schneider (@cove)" + - "Joshua Conner (@joshuaconner)" + - "Pavel Antonov (@softzilla)" + - "Ash Wilson (@smashwilson)" requirements: - "python >= 2.6" - "docker-py >= 0.3.0" diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index 10f63a987c5..09fc61e6b08 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -23,7 +23,7 @@ DOCUMENTATION = ''' --- module: docker_image -author: Pavel Antonov +author: "Pavel Antonov (@softzilla)" version_added: "1.5" short_description: manage docker images description: diff --git a/cloud/google/gc_storage.py b/cloud/google/gc_storage.py index 5e0c5e982e8..280bc42a219 100644 --- a/cloud/google/gc_storage.py +++ b/cloud/google/gc_storage.py @@ -84,7 +84,7 @@ requirements: - "python >= 2.6" - "boto >= 2.9" -author: benno@ansible.com Note. Most of the code has been taken from the S3 module. +author: "Benno Joy (@bennojoy)" ''' diff --git a/cloud/google/gce.py b/cloud/google/gce.py index 314f1200161..862f4a8b215 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -142,7 +142,7 @@ requirements: - "apache-libcloud >= 0.13.3" notes: - Either I(name) or I(instance_names) is required. -author: Eric Johnson +author: "Eric Johnson (@erjohnso) " ''' EXAMPLES = ''' diff --git a/cloud/google/gce_lb.py b/cloud/google/gce_lb.py index df6f9d3d65f..6a264839e50 100644 --- a/cloud/google/gce_lb.py +++ b/cloud/google/gce_lb.py @@ -134,7 +134,7 @@ options: requirements: - "python >= 2.6" - "apache-libcloud >= 0.13.3" -author: Eric Johnson +author: "Eric Johnson (@erjohnso) " ''' EXAMPLES = ''' diff --git a/cloud/google/gce_net.py b/cloud/google/gce_net.py index 079891c5e10..93844901117 100644 --- a/cloud/google/gce_net.py +++ b/cloud/google/gce_net.py @@ -105,7 +105,7 @@ options: requirements: - "python >= 2.6" - "apache-libcloud >= 0.13.3" -author: Eric Johnson +author: "Eric Johnson (@erjohnso) " ''' EXAMPLES = ''' diff --git a/cloud/google/gce_pd.py b/cloud/google/gce_pd.py index 9e2e173c530..2d70c9b335a 100644 --- a/cloud/google/gce_pd.py +++ b/cloud/google/gce_pd.py @@ -120,7 +120,7 @@ options: requirements: - "python >= 2.6" - "apache-libcloud >= 0.13.3" -author: Eric Johnson +author: "Eric Johnson (@erjohnso) " ''' EXAMPLES = ''' diff --git a/cloud/linode/linode.py b/cloud/linode/linode.py index dac22f7f2cb..9ebc770a47c 100644 --- a/cloud/linode/linode.py +++ b/cloud/linode/linode.py @@ -92,7 +92,7 @@ requirements: - "python >= 2.6" - "linode-python" - "pycurl" -author: Vincent Viallet +author: "Vincent Viallet (@zbal)" notes: - LINODE_API_KEY env variable can be used instead ''' diff --git a/cloud/openstack/keystone_user.py b/cloud/openstack/keystone_user.py index 89afe53fbd4..de5eed598c7 100644 --- a/cloud/openstack/keystone_user.py +++ b/cloud/openstack/keystone_user.py @@ -75,7 +75,7 @@ options: requirements: - "python >= 2.6" - python-keystoneclient -author: Lorin Hochstein +author: "Lorin Hochstein (@lorin)" ''' EXAMPLES = ''' diff --git a/cloud/openstack/nova_keypair.py b/cloud/openstack/nova_keypair.py index 1182b0daa4a..b2e38ff7db9 100644 --- a/cloud/openstack/nova_keypair.py +++ b/cloud/openstack/nova_keypair.py @@ -29,6 +29,9 @@ DOCUMENTATION = ''' --- module: nova_keypair version_added: "1.2" +author: + - "Benno Joy (@bennojoy)" + - "Michael DeHaan" short_description: Add/Delete key pair from nova description: - Add or Remove key pair from nova . diff --git a/cloud/openstack/os_auth.py b/cloud/openstack/os_auth.py index ec0e8414fd2..a881c217805 100644 --- a/cloud/openstack/os_auth.py +++ b/cloud/openstack/os_auth.py @@ -27,6 +27,7 @@ DOCUMENTATION = ''' module: os_auth short_description: Retrieve an auth token version_added: "2.0" +author: "Monty Taylor (@emonty)" description: - Retrieve an auth token from an OpenStack Cloud requirements: diff --git a/cloud/openstack/os_client_config.py b/cloud/openstack/os_client_config.py index 281bad49621..100608b0fd0 100644 --- a/cloud/openstack/os_client_config.py +++ b/cloud/openstack/os_client_config.py @@ -26,7 +26,7 @@ description: - Get I(openstack) client config data from clouds.yaml or environment version_added: "2.0" requirements: [ os-client-config ] -author: Monty Taylor +author: "Monty Taylor (@emonty)" ''' EXAMPLES = ''' diff --git a/cloud/openstack/os_image.py b/cloud/openstack/os_image.py index ffc99064ad2..115a3f2b4f8 100644 --- a/cloud/openstack/os_image.py +++ b/cloud/openstack/os_image.py @@ -30,6 +30,7 @@ module: os_image short_description: Add/Delete images from OpenStack Cloud extends_documentation_fragment: openstack version_added: "2.0" +author: "Monty Taylor (@emonty)" description: - Add or Remove images from the OpenStack Image Repository options: diff --git a/cloud/openstack/os_network.py b/cloud/openstack/os_network.py index 18e0aaa9a27..9c6174462f7 100644 --- a/cloud/openstack/os_network.py +++ b/cloud/openstack/os_network.py @@ -28,6 +28,7 @@ module: os_network short_description: Creates/Removes networks from OpenStack extends_documentation_fragment: openstack version_added: "2.0" +author: "Monty Taylor (@emonty)" description: - Add or Remove network from OpenStack. options: diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index a14c738be4a..932cebcc1c3 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -33,6 +33,7 @@ module: os_server short_description: Create/Delete Compute Instances from OpenStack extends_documentation_fragment: openstack version_added: "2.0" +author: "Monty Taylor (@emonty)" description: - Create or Remove compute instances from OpenStack. options: diff --git a/cloud/openstack/os_server_actions.py b/cloud/openstack/os_server_actions.py index 2b739df4de1..0cfc5bf47cc 100644 --- a/cloud/openstack/os_server_actions.py +++ b/cloud/openstack/os_server_actions.py @@ -31,6 +31,7 @@ module: os_server_actions short_description: Perform actions on Compute Instances from OpenStack extends_documentation_fragment: openstack version_added: "2.0" +author: "Jesse Keating (@j2sol)" description: - Perform server actions on an existing compute instance from OpenStack. This module does not return any data other than changed true/false. diff --git a/cloud/openstack/os_server_facts.py b/cloud/openstack/os_server_facts.py index fee14c7456c..5d61e4c18d3 100644 --- a/cloud/openstack/os_server_facts.py +++ b/cloud/openstack/os_server_facts.py @@ -27,6 +27,7 @@ DOCUMENTATION = ''' module: os_server_facts short_description: Retrieve facts about a compute instance version_added: "2.0" +author: "Monty Taylor (@emonty)" description: - Retrieve facts about a server instance from OpenStack. notes: diff --git a/cloud/openstack/os_server_volume.py b/cloud/openstack/os_server_volume.py index 47e1f433853..945a0ce8bf9 100644 --- a/cloud/openstack/os_server_volume.py +++ b/cloud/openstack/os_server_volume.py @@ -31,6 +31,7 @@ module: os_server_volume short_description: Attach/Detach Volumes from OpenStack VM's extends_documentation_fragment: openstack version_added: "2.0" +author: "Monty Taylor (@emonty)" description: - Attach or Detach volumes from OpenStack VM's options: diff --git a/cloud/openstack/os_subnet.py b/cloud/openstack/os_subnet.py index 2fdb4e0dd6d..54672b35ffb 100644 --- a/cloud/openstack/os_subnet.py +++ b/cloud/openstack/os_subnet.py @@ -29,6 +29,7 @@ module: os_subnet short_description: Add/Remove subnet to an OpenStack network extends_documentation_fragment: openstack version_added: "2.0" +author: "Monty Taylor (@emonty)" description: - Add or Remove a subnet to an OpenStack network options: diff --git a/cloud/openstack/os_volume.py b/cloud/openstack/os_volume.py index d5baffb96c6..87b7d9eab54 100644 --- a/cloud/openstack/os_volume.py +++ b/cloud/openstack/os_volume.py @@ -29,6 +29,7 @@ module: os_volume short_description: Create/Delete Cinder Volumes extends_documentation_fragment: openstack version_added: "2.0" +author: "Monty Taylor (@emonty)" description: - Create or Remove cinder block storage volumes options: diff --git a/cloud/openstack/quantum_floating_ip.py b/cloud/openstack/quantum_floating_ip.py index e89f23caa79..b7599da0725 100644 --- a/cloud/openstack/quantum_floating_ip.py +++ b/cloud/openstack/quantum_floating_ip.py @@ -33,6 +33,9 @@ DOCUMENTATION = ''' --- module: quantum_floating_ip version_added: "1.2" +author: + - "Benno Joy (@bennojoy)" + - "Brad P. Crochet (@bcrochet)" short_description: Add/Remove floating IP from an instance description: - Add or Remove a floating IP to an instance diff --git a/cloud/openstack/quantum_floating_ip_associate.py b/cloud/openstack/quantum_floating_ip_associate.py index b7e9f71e5fd..a5f39dec133 100644 --- a/cloud/openstack/quantum_floating_ip_associate.py +++ b/cloud/openstack/quantum_floating_ip_associate.py @@ -32,6 +32,7 @@ DOCUMENTATION = ''' --- module: quantum_floating_ip_associate version_added: "1.2" +author: "Benno Joy (@bennojoy)" short_description: Associate or disassociate a particular floating IP with an instance description: - Associates or disassociates a specific floating IP with a particular instance diff --git a/cloud/openstack/quantum_router.py b/cloud/openstack/quantum_router.py index 9588fc0951e..ba94773bbe4 100644 --- a/cloud/openstack/quantum_router.py +++ b/cloud/openstack/quantum_router.py @@ -30,6 +30,7 @@ DOCUMENTATION = ''' --- module: quantum_router version_added: "1.2" +author: "Benno Joy (@bennojoy)" short_description: Create or Remove router from openstack description: - Create or Delete routers from OpenStack diff --git a/cloud/openstack/quantum_router_gateway.py b/cloud/openstack/quantum_router_gateway.py index 6e8047c8e8d..48248662ed7 100644 --- a/cloud/openstack/quantum_router_gateway.py +++ b/cloud/openstack/quantum_router_gateway.py @@ -30,6 +30,7 @@ DOCUMENTATION = ''' --- module: quantum_router_gateway version_added: "1.2" +author: "Benno Joy (@bennojoy)" short_description: set/unset a gateway interface for the router with the specified external network description: - Creates/Removes a gateway interface from the router, used to associate a external network with a router to route external traffic. diff --git a/cloud/openstack/quantum_router_interface.py b/cloud/openstack/quantum_router_interface.py index 7d42ec6ff1d..7374b542390 100644 --- a/cloud/openstack/quantum_router_interface.py +++ b/cloud/openstack/quantum_router_interface.py @@ -30,6 +30,7 @@ DOCUMENTATION = ''' --- module: quantum_router_interface version_added: "1.2" +author: "Benno Joy (@bennojoy)" short_description: Attach/Dettach a subnet's interface to a router description: - Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet. diff --git a/cloud/rackspace/rax.py b/cloud/rackspace/rax.py index 874274c22f3..dad2e2a4f8d 100644 --- a/cloud/rackspace/rax.py +++ b/cloud/rackspace/rax.py @@ -182,7 +182,9 @@ options: description: - how long before wait gives up, in seconds default: 300 -author: Jesse Keating, Matt Martz +author: + - "Jesse Keating (@j2sol)" + - "Matt Martz (@sivel)" notes: - I(exact_count) can be "destructive" if the number of running servers in the I(group) is larger than that specified in I(count). In such a case, the diff --git a/cloud/rackspace/rax_cbs.py b/cloud/rackspace/rax_cbs.py index 6f922f0128e..ac4e8de424f 100644 --- a/cloud/rackspace/rax_cbs.py +++ b/cloud/rackspace/rax_cbs.py @@ -79,7 +79,9 @@ options: description: - how long before wait gives up, in seconds default: 300 -author: Christopher H. Laco, Matt Martz +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" extends_documentation_fragment: rackspace.openstack ''' diff --git a/cloud/rackspace/rax_cbs_attachments.py b/cloud/rackspace/rax_cbs_attachments.py index 870b8e611df..d762b797457 100644 --- a/cloud/rackspace/rax_cbs_attachments.py +++ b/cloud/rackspace/rax_cbs_attachments.py @@ -58,7 +58,9 @@ options: description: - how long before wait gives up, in seconds default: 300 -author: Christopher H. Laco, Matt Martz +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" extends_documentation_fragment: rackspace.openstack ''' diff --git a/cloud/rackspace/rax_cdb.py b/cloud/rackspace/rax_cdb.py index 55e486f79e5..6abadd2ebf4 100644 --- a/cloud/rackspace/rax_cdb.py +++ b/cloud/rackspace/rax_cdb.py @@ -52,7 +52,7 @@ options: description: - how long before wait gives up, in seconds default: 300 -author: Simon JAILLET +author: "Simon JAILLET (@jails)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_cdb_database.py b/cloud/rackspace/rax_cdb_database.py index cc7885ee31e..bfd5dbbf128 100644 --- a/cloud/rackspace/rax_cdb_database.py +++ b/cloud/rackspace/rax_cdb_database.py @@ -44,7 +44,7 @@ options: - Indicate desired state of the resource choices: ['present', 'absent'] default: present -author: Simon JAILLET +author: "Simon JAILLET (@jails)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_cdb_user.py b/cloud/rackspace/rax_cdb_user.py index a0958084c92..e5169def8df 100644 --- a/cloud/rackspace/rax_cdb_user.py +++ b/cloud/rackspace/rax_cdb_user.py @@ -51,7 +51,7 @@ options: - Indicate desired state of the resource choices: ['present', 'absent'] default: present -author: Simon JAILLET +author: "Simon JAILLET (@jails)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_dns.py b/cloud/rackspace/rax_dns.py index dacc4c672fe..b5dbe40b2e9 100644 --- a/cloud/rackspace/rax_dns.py +++ b/cloud/rackspace/rax_dns.py @@ -48,7 +48,7 @@ notes: - "It is recommended that plays utilizing this module be run with C(serial: 1) to avoid exceeding the API request limit imposed by the Rackspace CloudDNS API" -author: Matt Martz +author: "Matt Martz (@sivel)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_dns_record.py b/cloud/rackspace/rax_dns_record.py index a28f5b9a9b3..825bb3f21a2 100644 --- a/cloud/rackspace/rax_dns_record.py +++ b/cloud/rackspace/rax_dns_record.py @@ -84,7 +84,7 @@ notes: supplied - As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record. - C(PTR) record support was added in version 1.7 -author: Matt Martz +author: "Matt Martz (@sivel)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_facts.py b/cloud/rackspace/rax_facts.py index 68ef446f760..c30df5b9462 100644 --- a/cloud/rackspace/rax_facts.py +++ b/cloud/rackspace/rax_facts.py @@ -35,7 +35,7 @@ options: description: - Server name to retrieve facts for default: null -author: Matt Martz +author: "Matt Martz (@sivel)" extends_documentation_fragment: rackspace.openstack ''' diff --git a/cloud/rackspace/rax_files.py b/cloud/rackspace/rax_files.py index 3c54b0a9e2f..c3f9e8ddec4 100644 --- a/cloud/rackspace/rax_files.py +++ b/cloud/rackspace/rax_files.py @@ -76,7 +76,7 @@ options: web_index: description: - Sets an object to be presented as the HTTP index page when accessed by the CDN URL -author: Paul Durivage +author: "Paul Durivage (@angstwad)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_files_objects.py b/cloud/rackspace/rax_files_objects.py index f2510477674..0274a79004d 100644 --- a/cloud/rackspace/rax_files_objects.py +++ b/cloud/rackspace/rax_files_objects.py @@ -92,7 +92,7 @@ options: - file - meta default: file -author: Paul Durivage +author: "Paul Durivage (@angstwad)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_identity.py b/cloud/rackspace/rax_identity.py index 47b4cb60cf0..a0697742b8e 100644 --- a/cloud/rackspace/rax_identity.py +++ b/cloud/rackspace/rax_identity.py @@ -29,7 +29,9 @@ options: - Indicate desired state of the resource choices: ['present', 'absent'] default: present -author: Christopher H. Laco, Matt Martz +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" extends_documentation_fragment: rackspace.openstack ''' diff --git a/cloud/rackspace/rax_keypair.py b/cloud/rackspace/rax_keypair.py index 8f38abc12e0..e8cadd968fb 100644 --- a/cloud/rackspace/rax_keypair.py +++ b/cloud/rackspace/rax_keypair.py @@ -39,7 +39,7 @@ options: - present - absent default: present -author: Matt Martz +author: "Matt Martz (@sivel)" notes: - Keypairs cannot be manipulated, only created and deleted. To "update" a keypair you must first delete and then recreate. diff --git a/cloud/rackspace/rax_meta.py b/cloud/rackspace/rax_meta.py index 2e1d90f5389..1c6c3bfd234 100644 --- a/cloud/rackspace/rax_meta.py +++ b/cloud/rackspace/rax_meta.py @@ -39,7 +39,7 @@ options: description: - A hash of metadata to associate with the instance default: null -author: Matt Martz +author: "Matt Martz (@sivel)" extends_documentation_fragment: rackspace.openstack ''' diff --git a/cloud/rackspace/rax_network.py b/cloud/rackspace/rax_network.py index bd23f5f878d..cea7531a8db 100644 --- a/cloud/rackspace/rax_network.py +++ b/cloud/rackspace/rax_network.py @@ -39,7 +39,9 @@ options: description: - cidr of the network being created default: null -author: Christopher H. Laco, Jesse Keating +author: + - "Christopher H. Laco (@claco)" + - "Jesse Keating (@j2sol)" extends_documentation_fragment: rackspace.openstack ''' diff --git a/cloud/rackspace/rax_queue.py b/cloud/rackspace/rax_queue.py index d3e5ac3f81e..8b1a60e4b81 100644 --- a/cloud/rackspace/rax_queue.py +++ b/cloud/rackspace/rax_queue.py @@ -35,7 +35,9 @@ options: - present - absent default: present -author: Christopher H. Laco, Matt Martz +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_scaling_group.py b/cloud/rackspace/rax_scaling_group.py index 64783397016..1761c8a3d31 100644 --- a/cloud/rackspace/rax_scaling_group.py +++ b/cloud/rackspace/rax_scaling_group.py @@ -105,7 +105,7 @@ options: - Data to be uploaded to the servers config drive. This option implies I(config_drive). Can be a file path or a string version_added: 1.8 -author: Matt Martz +author: "Matt Martz (@sivel)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_scaling_policy.py b/cloud/rackspace/rax_scaling_policy.py index b3da82460d8..b216ca13274 100644 --- a/cloud/rackspace/rax_scaling_policy.py +++ b/cloud/rackspace/rax_scaling_policy.py @@ -73,7 +73,7 @@ options: - present - absent default: present -author: Matt Martz +author: "Matt Martz (@sivel)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 256ec00abab..7c41e6e4d1d 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -119,7 +119,7 @@ options: notes: - This module should run from a system that can access vSphere directly. Either by using local_action, or using delegate_to. -author: Richard Hoop +author: "Richard Hoop (@rhoop) " requirements: - "python >= 2.6" - pysphere From 2a5f0bde8796bb366f06a39fab576c362eb048a5 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Mon, 15 Jun 2015 15:53:30 -0400 Subject: [PATCH 234/464] Proper author info for all remaining modules --- commands/command.py | 4 +++- commands/raw.py | 4 +++- commands/script.py | 4 +++- commands/shell.py | 4 +++- database/mysql/mysql_db.py | 2 +- database/mysql/mysql_user.py | 2 +- database/mysql/mysql_variables.py | 1 + database/postgresql/postgresql_db.py | 2 +- database/postgresql/postgresql_privs.py | 2 +- database/postgresql/postgresql_user.py | 2 +- files/acl.py | 2 +- files/assemble.py | 2 +- files/copy.py | 4 +++- files/fetch.py | 4 +++- files/file.py | 4 +++- files/ini_file.py | 2 +- files/lineinfile.py | 4 +++- files/replace.py | 2 +- files/stat.py | 2 +- files/synchronize.py | 2 +- files/template.py | 4 +++- files/unarchive.py | 2 +- files/xattr.py | 2 +- inventory/add_host.py | 4 +++- inventory/group_by.py | 2 +- network/basics/get_url.py | 2 +- network/basics/slurp.py | 4 +++- network/basics/uri.py | 2 +- packaging/language/easy_install.py | 2 +- packaging/language/gem.py | 4 +++- packaging/language/pip.py | 2 +- packaging/os/apt.py | 2 +- packaging/os/apt_key.py | 2 +- packaging/os/apt_repository.py | 2 +- packaging/os/apt_rpm.py | 2 +- packaging/os/redhat_subscription.py | 2 +- packaging/os/rhn_channel.py | 2 +- packaging/os/rpm_key.py | 2 +- packaging/os/yum.py | 4 +++- source_control/git.py | 4 +++- source_control/hg.py | 2 +- source_control/subversion.py | 2 +- system/authorized_key.py | 2 +- system/cron.py | 2 +- system/group.py | 2 +- system/hostname.py | 2 +- system/mount.py | 4 +++- system/ping.py | 4 +++- system/seboolean.py | 2 +- system/selinux.py | 2 +- system/service.py | 4 +++- system/setup.py | 4 +++- system/sysctl.py | 2 +- system/user.py | 2 +- utilities/helper/accelerate.py | 2 +- utilities/helper/fireball.py | 4 +++- utilities/logic/assert.py | 4 +++- utilities/logic/async_status.py | 4 +++- utilities/logic/debug.py | 4 +++- utilities/logic/fail.py | 2 +- utilities/logic/include_vars.py | 2 +- utilities/logic/pause.py | 2 +- utilities/logic/set_fact.py | 2 +- utilities/logic/wait_for.py | 5 ++++- web_infrastructure/apache2_module.py | 1 + web_infrastructure/django_manage.py | 2 +- web_infrastructure/htpasswd.py | 2 +- web_infrastructure/supervisorctl.py | 4 +++- windows/win_feature.py | 4 +++- windows/win_get_url.py | 2 +- windows/win_group.py | 2 +- windows/win_msi.py | 2 +- windows/win_ping.py | 2 +- windows/win_service.py | 2 +- windows/win_stat.py | 2 +- windows/win_user.py | 4 +++- 76 files changed, 129 insertions(+), 74 deletions(-) diff --git a/commands/command.py b/commands/command.py index 6baf35922c2..b0aa5a7b99f 100644 --- a/commands/command.py +++ b/commands/command.py @@ -81,7 +81,9 @@ notes: M(command) module is much more secure as it's not affected by the user's environment. - " C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not exist, use this." -author: Michael DeHaan +author: + - Ansible Core Team + - Michael DeHaan ''' EXAMPLES = ''' diff --git a/commands/raw.py b/commands/raw.py index 87f2b5c4bdc..5305c978630 100644 --- a/commands/raw.py +++ b/commands/raw.py @@ -34,7 +34,9 @@ notes: playbooks will follow the trend of using M(command) unless M(shell) is explicitly required. When running ad-hoc commands, use your best judgement. -author: Michael DeHaan +author: + - Ansible Core Team + - Michael DeHaan ''' EXAMPLES = ''' diff --git a/commands/script.py b/commands/script.py index 01a1ae34e71..ccf15331a6c 100644 --- a/commands/script.py +++ b/commands/script.py @@ -32,7 +32,9 @@ options: version_added: "1.5" notes: - It is usually preferable to write Ansible modules than pushing scripts. Convert your script to an Ansible module for bonus points! -author: Michael DeHaan +author: + - Ansible Core Team + - Michael DeHaan """ EXAMPLES = ''' diff --git a/commands/shell.py b/commands/shell.py index b63a21080ee..cccc90f05ff 100644 --- a/commands/shell.py +++ b/commands/shell.py @@ -57,7 +57,9 @@ notes: "{{ var | quote }}" instead of just "{{ var }}" to make sure they don't include evil things like semicolons. requirements: [ ] -author: Michael DeHaan +author: + - Ansible Core Team + - Michael DeHaan ''' EXAMPLES = ''' diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index a76c4526727..e9a530811d4 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -89,7 +89,7 @@ notes: the credentials from C(~/.my.cnf), and finally fall back to using the MySQL default login of C(root) with no password. requirements: [ ConfigParser ] -author: Mark Theunissen +author: "Mark Theunissen (@marktheunissen)" ''' EXAMPLES = ''' diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index c6d34ea0635..244333901c3 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -120,7 +120,7 @@ notes: the file." requirements: [ "MySQLdb" ] -author: Mark Theunissen +author: "Mark Theunissen (@marktheunissen)" ''' EXAMPLES = """ diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py index 199c5eb6eca..0b0face0328 100644 --- a/database/mysql/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -30,6 +30,7 @@ short_description: Manage MySQL global variables description: - Query / Set MySQL variables version_added: 1.3 +author: "Balazs Pocze (@banyek)" options: variable: description: diff --git a/database/postgresql/postgresql_db.py b/database/postgresql/postgresql_db.py index 4ce8e146ccd..469d68fa0fa 100644 --- a/database/postgresql/postgresql_db.py +++ b/database/postgresql/postgresql_db.py @@ -95,7 +95,7 @@ notes: - This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module. requirements: [ psycopg2 ] -author: Lorin Hochstein +author: "Lorin Hochstein (@lorin)" ''' EXAMPLES = ''' diff --git a/database/postgresql/postgresql_privs.py b/database/postgresql/postgresql_privs.py index 22a565f6b65..10f2361bfb2 100644 --- a/database/postgresql/postgresql_privs.py +++ b/database/postgresql/postgresql_privs.py @@ -136,7 +136,7 @@ notes: another user also, R can still access database objects via these privileges. - When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs). requirements: [psycopg2] -author: Bernhard Weitzhofer +author: "Bernhard Weitzhofer (@b6d)" """ EXAMPLES = """ diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index 2998ab273f9..d3f6d81c360 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -137,7 +137,7 @@ notes: to all users. You may not specify password or role_attr_flags when the PUBLIC user is specified. requirements: [ psycopg2 ] -author: Lorin Hochstein +author: "Lorin Hochstein (@lorin)" ''' EXAMPLES = ''' diff --git a/files/acl.py b/files/acl.py index 0c568ba59a5..0c924fee94c 100644 --- a/files/acl.py +++ b/files/acl.py @@ -79,7 +79,7 @@ options: description: - DEPRECATED. The acl to set or remove. This must always be quoted in the form of '::'. The qualifier may be empty for some types, but the type and perms are always requried. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields. -author: Brian Coca +author: "Brian Coca (@bcoca)" notes: - The "acl" module requires that acls are enabled on the target filesystem and that the setfacl and getfacl binaries are installed. ''' diff --git a/files/assemble.py b/files/assemble.py index a66c82f432a..1f9a952d04a 100644 --- a/files/assemble.py +++ b/files/assemble.py @@ -79,7 +79,7 @@ options: U(http://docs.python.org/2/library/re.html). required: false default: null -author: Stephen Fromm +author: "Stephen Fromm (@sfromm)" extends_documentation_fragment: files ''' diff --git a/files/copy.py b/files/copy.py index e80ed805539..b7f333cead6 100644 --- a/files/copy.py +++ b/files/copy.py @@ -86,7 +86,9 @@ options: required: false version_added: "1.5" extends_documentation_fragment: files -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" notes: - The "copy" module recursively copy facility does not scale to lots (>hundreds) of files. For alternative, see synchronize module, which is a wrapper around rsync. diff --git a/files/fetch.py b/files/fetch.py index 37ead3a7352..b8234374976 100644 --- a/files/fetch.py +++ b/files/fetch.py @@ -50,7 +50,9 @@ options: will use the basename of the source file, similar to the copy module. Obviously this is only handy if the filenames are unique. requirements: [] -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" ''' EXAMPLES = ''' diff --git a/files/file.py b/files/file.py index 329fe1e0263..55d3665028e 100644 --- a/files/file.py +++ b/files/file.py @@ -41,7 +41,9 @@ description: notes: - See also M(copy), M(template), M(assemble) requirements: [ ] -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" options: path: description: diff --git a/files/ini_file.py b/files/ini_file.py index e247c265fc8..9242821ae9e 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -73,7 +73,7 @@ notes: Either use M(template) to create a base INI file with a C([default]) section, or use M(lineinfile) to add the missing line. requirements: [ ConfigParser ] -author: Jan-Piet Mens +author: "Jan-Piet Mens (@jpmens)" ''' EXAMPLES = ''' diff --git a/files/lineinfile.py b/files/lineinfile.py index e66bdc01131..fafb8470b50 100644 --- a/files/lineinfile.py +++ b/files/lineinfile.py @@ -27,7 +27,9 @@ import tempfile DOCUMENTATION = """ --- module: lineinfile -author: Daniel Hokka Zakrisson, Ahti Kitsik +author: + - "Daniel Hokka Zakrissoni (@dhozac)" + - "Ahti Kitsik (@ahtik)" extends_documentation_fragment: files short_description: Ensure a particular line is in a file, or replace an existing line using a back-referenced regular expression. diff --git a/files/replace.py b/files/replace.py index a21d84cd8df..fa0142823ea 100644 --- a/files/replace.py +++ b/files/replace.py @@ -25,7 +25,7 @@ import tempfile DOCUMENTATION = """ --- module: replace -author: Evan Kaufman +author: "Evan Kaufman (@EvanK)" extends_documentation_fragment: files short_description: Replace all instances of a particular string in a file using a back-referenced regular expression. diff --git a/files/stat.py b/files/stat.py index 798a560369e..5f79874d9fd 100644 --- a/files/stat.py +++ b/files/stat.py @@ -47,7 +47,7 @@ options: default: yes aliases: [] version_added: "1.8" -author: Bruce Pennypacker +author: "Bruce Pennypacker (@bpennypacker)" ''' EXAMPLES = ''' diff --git a/files/synchronize.py b/files/synchronize.py index 2fb93e4cad3..7f934e4e6f4 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -163,7 +163,7 @@ notes: C(.rsync-filter) files to the source directory. -author: Timothy Appnel +author: "Timothy Appnel (@tima)" ''' EXAMPLES = ''' diff --git a/files/template.py b/files/template.py index 7ba072fcdc1..2feb599abdf 100644 --- a/files/template.py +++ b/files/template.py @@ -50,7 +50,9 @@ options: notes: - "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)." requirements: [] -author: Michael DeHaan +author: + - Ansible Core Team + - Michael DeHaan extends_documentation_fragment: files ''' diff --git a/files/unarchive.py b/files/unarchive.py index 625989ffdfb..386503cadd3 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -58,7 +58,7 @@ options: choices: [ "yes", "no" ] default: "no" version_added: "2.0" -author: Dylan Martin +author: "Dylan Martin (@pileofrogs)" todo: - detect changed/unchanged for .zip files - handle common unarchive args, like preserve owner/timestamp etc... diff --git a/files/xattr.py b/files/xattr.py index 94115ae3b51..5e67e5e03a6 100644 --- a/files/xattr.py +++ b/files/xattr.py @@ -58,7 +58,7 @@ options: - if yes, dereferences symlinks and sets/gets attributes on symlink target, otherwise acts on symlink itself. -author: Brian Coca +author: "Brian Coca (@bcoca)" ''' EXAMPLES = ''' diff --git a/inventory/add_host.py b/inventory/add_host.py index b28c6f90182..2ab76b4c16a 100644 --- a/inventory/add_host.py +++ b/inventory/add_host.py @@ -22,7 +22,9 @@ options: notes: - This module bypasses the play host loop and only runs once for all the hosts in the play, if you need it to iterate use a with\_ directive. -author: Seth Vidal +author: + - "Ansible Core Team" + - "Seth Vidal" ''' EXAMPLES = ''' diff --git a/inventory/group_by.py b/inventory/group_by.py index d09552e662c..f63bdf5912b 100644 --- a/inventory/group_by.py +++ b/inventory/group_by.py @@ -12,7 +12,7 @@ options: description: - The variables whose values will be used as groups required: true -author: Jeroen Hoekx +author: "Jeroen Hoekx (@jhoekx)" notes: - Spaces in group names are converted to dashes '-'. ''' diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 2bf37e3b129..074bf8bb484 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -114,7 +114,7 @@ options: required: false # informational: requirements for nodes requirements: [ urllib2, urlparse ] -author: Jan-Piet Mens +author: "Jan-Piet Mens (@jpmens)" ''' EXAMPLES=''' diff --git a/network/basics/slurp.py b/network/basics/slurp.py index a2130c354b2..f96434f5fd3 100644 --- a/network/basics/slurp.py +++ b/network/basics/slurp.py @@ -37,7 +37,9 @@ options: notes: - "See also: M(fetch)" requirements: [] -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" ''' EXAMPLES = ''' diff --git a/network/basics/uri.py b/network/basics/uri.py index 6138edbf94b..b7fa8282c83 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -144,7 +144,7 @@ options: # informational: requirements for nodes requirements: [ urlparse, httplib2 ] -author: Romeo Theriault +author: "Romeo Theriault (@romeotheriault)" ''' EXAMPLES = ''' diff --git a/packaging/language/easy_install.py b/packaging/language/easy_install.py index d566e003d81..017f6b818a6 100644 --- a/packaging/language/easy_install.py +++ b/packaging/language/easy_install.py @@ -85,7 +85,7 @@ notes: - Also note that I(virtualenv) must be installed on the remote host if the C(virtualenv) parameter is specified. requirements: [ "virtualenv" ] -author: Matt Wright +author: "Matt Wright (@mattupstate)" ''' EXAMPLES = ''' diff --git a/packaging/language/gem.py b/packaging/language/gem.py index 54d06da7466..1d15763fbb7 100644 --- a/packaging/language/gem.py +++ b/packaging/language/gem.py @@ -78,7 +78,9 @@ options: - Allow adding build flags for gem compilation required: false version_added: "2.0" -author: Johan Wiren +author: + - "Ansible Core Team" + - "Johan Wiren" ''' EXAMPLES = ''' diff --git a/packaging/language/pip.py b/packaging/language/pip.py index a0c70c1a187..b27e136689d 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -100,7 +100,7 @@ options: notes: - Please note that virtualenv (U(http://www.virtualenv.org/)) must be installed on the remote host if the virtualenv parameter is specified and the virtualenv needs to be initialized. requirements: [ "virtualenv", "pip" ] -author: Matt Wright +author: "Matt Wright (@mattupstate)" ''' EXAMPLES = ''' diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 61e2fc3f039..09129a73fa5 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -94,7 +94,7 @@ options: required: false version_added: "1.6" requirements: [ python-apt, aptitude ] -author: Matthew Williams +author: "Matthew Williams (@mgwilliams)" notes: - Three of the upgrade modes (C(full), C(safe) and its alias C(yes)) require C(aptitude), otherwise C(apt-get) suffices. diff --git a/packaging/os/apt_key.py b/packaging/os/apt_key.py index 2967646feff..d41664f121a 100644 --- a/packaging/os/apt_key.py +++ b/packaging/os/apt_key.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: apt_key -author: Jayson Vantuyl & others +author: "Jayson Vantuyl & others (@jvantuyl)" version_added: "1.0" short_description: Add or remove an apt key description: diff --git a/packaging/os/apt_repository.py b/packaging/os/apt_repository.py index 5153699c8bf..9d48edec7bb 100644 --- a/packaging/os/apt_repository.py +++ b/packaging/os/apt_repository.py @@ -63,7 +63,7 @@ options: required: false default: 'yes' choices: ['yes', 'no'] -author: Alexander Saltanov +author: "Alexander Saltanov (@sashka)" version_added: "0.7" requirements: [ python-apt ] ''' diff --git a/packaging/os/apt_rpm.py b/packaging/os/apt_rpm.py index a85c528a239..fec220e0512 100644 --- a/packaging/os/apt_rpm.py +++ b/packaging/os/apt_rpm.py @@ -44,7 +44,7 @@ options: required: false default: no choices: [ "yes", "no" ] -author: Evgenii Terechkov +author: "Evgenii Terechkov (@evgkrsk)" notes: [] ''' diff --git a/packaging/os/redhat_subscription.py b/packaging/os/redhat_subscription.py index 4248f3923a9..1cfd8fc25a6 100644 --- a/packaging/os/redhat_subscription.py +++ b/packaging/os/redhat_subscription.py @@ -7,7 +7,7 @@ short_description: Manage Red Hat Network registration and subscriptions using t description: - Manage registration and subscription to the Red Hat Network entitlement platform. version_added: "1.2" -author: James Laska +author: "James Laska (@jlaska)" notes: - In order to register a system, subscription-manager requires either a username and password, or an activationkey. requirements: diff --git a/packaging/os/rhn_channel.py b/packaging/os/rhn_channel.py index 42d61f36e66..0071183158e 100644 --- a/packaging/os/rhn_channel.py +++ b/packaging/os/rhn_channel.py @@ -24,7 +24,7 @@ short_description: Adds or removes Red Hat software channels description: - Adds or removes Red Hat software channels version_added: "1.1" -author: Vincent Van der Kussen +author: "Vincent Van der Kussen (@vincentvdk)" notes: - this module fetches the system id from RHN. requirements: diff --git a/packaging/os/rpm_key.py b/packaging/os/rpm_key.py index f132d552506..1b38da3823b 100644 --- a/packaging/os/rpm_key.py +++ b/packaging/os/rpm_key.py @@ -22,7 +22,7 @@ DOCUMENTATION = ''' --- module: rpm_key -author: Hector Acosta +author: "Hector Acosta (@hacosta) " short_description: Adds or removes a gpg key from the rpm db description: - Adds or removes (rpm --import) a gpg key to your rpm database. diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 9490a15b15d..36fc96f6cea 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -121,7 +121,9 @@ options: notes: [] # informational: requirements for nodes requirements: [ yum ] -author: Seth Vidal +author: + - "Ansible Core Team" + - "Seth Vidal" ''' EXAMPLES = ''' diff --git a/source_control/git.py b/source_control/git.py index d1ed929a68e..369430211f3 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -21,7 +21,9 @@ DOCUMENTATION = ''' --- module: git -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" version_added: "0.0.1" short_description: Deploy software (or files) from git checkouts description: diff --git a/source_control/hg.py b/source_control/hg.py index d83215fabe1..47b23d26fd5 100644 --- a/source_control/hg.py +++ b/source_control/hg.py @@ -32,7 +32,7 @@ short_description: Manages Mercurial (hg) repositories. description: - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address. version_added: "1.0" -author: Yeukhon Wong +author: "Yeukhon Wong (@yeukhon)" options: repo: description: diff --git a/source_control/subversion.py b/source_control/subversion.py index 7d49d0a2272..e3ff6dbfba5 100644 --- a/source_control/subversion.py +++ b/source_control/subversion.py @@ -25,7 +25,7 @@ short_description: Deploys a subversion repository. description: - Deploy given repository URL / revision to dest. If dest exists, update to the specified revision, otherwise perform a checkout. version_added: "0.7" -author: Dane Summers, njharman@gmail.com +author: "Dane Summers (@dsummersl) " notes: - Requires I(svn) to be installed on the client. requirements: [] diff --git a/system/authorized_key.py b/system/authorized_key.py index 458b94dff04..bb223acbe4d 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -81,7 +81,7 @@ options: version_added: "1.9" description: - "Adds or removes authorized keys for particular user accounts" -author: Brad Olson +author: "Brad Olson (@bradobro)" ''' EXAMPLES = ''' diff --git a/system/cron.py b/system/cron.py index cfb254acee4..b694bab8f20 100644 --- a/system/cron.py +++ b/system/cron.py @@ -118,7 +118,7 @@ options: choices: [ "reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly" ] requirements: - cron -author: Dane Summers +author: "Dane Summers (@dsummersl)" updates: [ 'Mike Grozak', 'Patrick Callahan' ] """ diff --git a/system/group.py b/system/group.py index 83ea410b0b1..d952cb5c28c 100644 --- a/system/group.py +++ b/system/group.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: group -author: Stephen Fromm +author: "Stephen Fromm (@sfromm)" version_added: "0.0.2" short_description: Add or remove groups requirements: [ groupadd, groupdel, groupmod ] diff --git a/system/hostname.py b/system/hostname.py index b90b0441595..882402a5e21 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: hostname -author: Hiroaki Nakamura +author: "Hiroaki Nakamura (@hnakamur)" version_added: "1.4" short_description: Manage hostname requirements: [ hostname ] diff --git a/system/mount.py b/system/mount.py index e11d497220b..1564d0999f4 100644 --- a/system/mount.py +++ b/system/mount.py @@ -79,7 +79,9 @@ options: notes: [] requirements: [] -author: Seth Vidal +author: + - Ansible Core Team + - Seth Vidal ''' EXAMPLES = ''' # Mount DVD read-only diff --git a/system/ping.py b/system/ping.py index b098d0054cd..bea7fb22f1d 100644 --- a/system/ping.py +++ b/system/ping.py @@ -29,7 +29,9 @@ description: contact. It does not make sense in playbooks, but it is useful from C(/usr/bin/ansible) options: {} -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" ''' EXAMPLES = ''' diff --git a/system/seboolean.py b/system/seboolean.py index 9799e71636a..3a150d05a20 100644 --- a/system/seboolean.py +++ b/system/seboolean.py @@ -45,7 +45,7 @@ options: notes: - Not tested on any debian based system requirements: [ ] -author: Stephen Fromm +author: "Stephen Fromm (@sfromm)" ''' EXAMPLES = ''' diff --git a/system/selinux.py b/system/selinux.py index 7f88a4a47a8..2debb95a475 100644 --- a/system/selinux.py +++ b/system/selinux.py @@ -45,7 +45,7 @@ options: notes: - Not tested on any debian based system requirements: [ libselinux-python ] -author: Derek Carter +author: "Derek Carter (@goozbach) " ''' EXAMPLES = ''' diff --git a/system/service.py b/system/service.py index 3299b614d52..763553db124 100644 --- a/system/service.py +++ b/system/service.py @@ -21,7 +21,9 @@ DOCUMENTATION = ''' --- module: service -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" version_added: "0.1" short_description: Manage services. description: diff --git a/system/setup.py b/system/setup.py index 486304230bf..2fbe71e260a 100644 --- a/system/setup.py +++ b/system/setup.py @@ -57,7 +57,9 @@ notes: - If the target host is Windows, you will not currently have the ability to use C(fact_path) or C(filter) as this is provided by a simpler implementation of the module. Different facts are returned for Windows hosts. -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" ''' EXAMPLES = """ diff --git a/system/sysctl.py b/system/sysctl.py index c5a68685053..e48d5df74c5 100644 --- a/system/sysctl.py +++ b/system/sysctl.py @@ -71,7 +71,7 @@ options: default: False notes: [] requirements: [] -author: David "DaviXX" CHANIAL +author: "David CHANIAL (@davixx) " ''' EXAMPLES = ''' diff --git a/system/user.py b/system/user.py index dd141565bde..7c3fa4c8594 100644 --- a/system/user.py +++ b/system/user.py @@ -21,7 +21,7 @@ DOCUMENTATION = ''' --- module: user -author: Stephen Fromm +author: "Stephen Fromm (@sfromm)" version_added: "0.2" short_description: Manage user accounts requirements: [ useradd, userdel, usermod ] diff --git a/utilities/helper/accelerate.py b/utilities/helper/accelerate.py index 726195d72e4..8ae8ab263be 100644 --- a/utilities/helper/accelerate.py +++ b/utilities/helper/accelerate.py @@ -66,7 +66,7 @@ notes: requirements: - "python >= 2.6" - "python-keyczar" -author: James Cammarata +author: "James Cammarata (@jimi-c)" ''' EXAMPLES = ''' diff --git a/utilities/helper/fireball.py b/utilities/helper/fireball.py index 43760969a89..97b4acc85a0 100644 --- a/utilities/helper/fireball.py +++ b/utilities/helper/fireball.py @@ -45,7 +45,9 @@ options: notes: - See the advanced playbooks chapter for more about using fireball mode. requirements: [ "zmq", "keyczar" ] -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" ''' EXAMPLES = ''' diff --git a/utilities/logic/assert.py b/utilities/logic/assert.py index f5963d60cd7..e9e359f421a 100644 --- a/utilities/logic/assert.py +++ b/utilities/logic/assert.py @@ -31,7 +31,9 @@ options: - "A string expression of the same form that can be passed to the 'when' statement" - "Alternatively, a list of string expressions" required: true -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" ''' EXAMPLES = ''' diff --git a/utilities/logic/async_status.py b/utilities/logic/async_status.py index f991b50064b..8b134c94a7b 100644 --- a/utilities/logic/async_status.py +++ b/utilities/logic/async_status.py @@ -43,7 +43,9 @@ options: notes: - See also U(http://docs.ansible.com/playbooks_async.html) requirements: [] -author: Michael DeHaan +author: + - "Ansible Core Team" + - "Michael DeHaan" ''' import datetime diff --git a/utilities/logic/debug.py b/utilities/logic/debug.py index 2df68ca0830..5142709dbe0 100644 --- a/utilities/logic/debug.py +++ b/utilities/logic/debug.py @@ -38,7 +38,9 @@ options: var: description: - A variable name to debug. Mutually exclusive with the 'msg' option. -author: Dag Wieers, Michael DeHaan +author: + - "Dag Wieers (@dagwieers)" + - "Michael DeHaan" ''' EXAMPLES = ''' diff --git a/utilities/logic/fail.py b/utilities/logic/fail.py index 23f5b83668c..75a7c81d1cf 100644 --- a/utilities/logic/fail.py +++ b/utilities/logic/fail.py @@ -34,7 +34,7 @@ options: required: false default: "'Failed as requested from task'" -author: Dag Wieers +author: "Dag Wieers (@dagwieers)" ''' EXAMPLES = ''' diff --git a/utilities/logic/include_vars.py b/utilities/logic/include_vars.py index 4c7c39d9035..a6b2b5b152f 100644 --- a/utilities/logic/include_vars.py +++ b/utilities/logic/include_vars.py @@ -10,7 +10,7 @@ DOCUMENTATION = ''' --- -author: Benno Joy +author: "Benno Joy (@bennojoy)" module: include_vars short_description: Load variables from files, dynamically within a task. description: diff --git a/utilities/logic/pause.py b/utilities/logic/pause.py index 6e8a83afe61..f1d10bf017f 100644 --- a/utilities/logic/pause.py +++ b/utilities/logic/pause.py @@ -25,7 +25,7 @@ options: - Optional text to use for the prompt message. required: false default: null -author: Tim Bielawa +author: "Tim Bielawa (@tbielawa)" ''' EXAMPLES = ''' diff --git a/utilities/logic/set_fact.py b/utilities/logic/set_fact.py index f9124ab0ea5..f05dbf76795 100644 --- a/utilities/logic/set_fact.py +++ b/utilities/logic/set_fact.py @@ -20,7 +20,7 @@ DOCUMENTATION = ''' --- -author: Dag Wieers +author: "Dag Wieers (@dagwieers)" module: set_fact short_description: Set host facts from a task description: diff --git a/utilities/logic/wait_for.py b/utilities/logic/wait_for.py index 4aa5bc78281..95653b56d3e 100644 --- a/utilities/logic/wait_for.py +++ b/utilities/logic/wait_for.py @@ -101,7 +101,10 @@ options: notes: - The ability to use search_regex with a port connection was added in 1.7. requirements: [] -author: Jeroen Hoekx, John Jarvis, Andrii Radyk +author: + - "Jeroen Hoekx (@jhoekx)" + - "John Jarvis (@jarv)" + - "Andrii Radyk (@AnderEnder)" ''' EXAMPLES = ''' diff --git a/web_infrastructure/apache2_module.py b/web_infrastructure/apache2_module.py index 817e782aa76..ec9a8985e60 100644 --- a/web_infrastructure/apache2_module.py +++ b/web_infrastructure/apache2_module.py @@ -20,6 +20,7 @@ DOCUMENTATION = ''' --- module: apache2_module version_added: 1.6 +author: "Christian Berendt (@berendt)" short_description: enables/disables a module of the Apache2 webserver description: - Enables or disables a specified module of the Apache2 webserver. diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py index 46ebb2fb8f1..4dce581fa80 100644 --- a/web_infrastructure/django_manage.py +++ b/web_infrastructure/django_manage.py @@ -92,7 +92,7 @@ notes: - To be able to use the migrate command, you must have south installed and added as an app in your settings - To be able to use the collectstatic command, you must have enabled staticfiles in your settings requirements: [ "virtualenv", "django" ] -author: Scott Anderson +author: "Scott Anderson (@tastychutney)" ''' EXAMPLES = """ diff --git a/web_infrastructure/htpasswd.py b/web_infrastructure/htpasswd.py index 4a72ea37fec..fce8b062d1c 100644 --- a/web_infrastructure/htpasswd.py +++ b/web_infrastructure/htpasswd.py @@ -66,7 +66,7 @@ notes: - "On Debian, Ubuntu, or Fedora: install I(python-passlib)." - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)." requires: [ passlib>=1.6 ] -author: Lorin Hochstein +author: "Lorin Hochstein (@lorin)" """ EXAMPLES = """ diff --git a/web_infrastructure/supervisorctl.py b/web_infrastructure/supervisorctl.py index ef86eec26a7..47d341c9e7b 100644 --- a/web_infrastructure/supervisorctl.py +++ b/web_infrastructure/supervisorctl.py @@ -75,7 +75,9 @@ notes: - When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist. - When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart). requirements: [ "supervisorctl" ] -author: Matt Wright, Aaron Wang +author: + - "Matt Wright (@mattupstate)" + - "Aaron Wang (@inetfuture) " ''' EXAMPLES = ''' diff --git a/windows/win_feature.py b/windows/win_feature.py index ef344ee3b22..2d7a747cea0 100644 --- a/windows/win_feature.py +++ b/windows/win_feature.py @@ -68,7 +68,9 @@ options: - no default: null aliases: [] -author: Paul Durivage / Trond Hindenes +author: + - "Paul Durivage (@angstwad)" + - "Trond Hindenes (@trondhindenes)" ''' EXAMPLES = ''' diff --git a/windows/win_get_url.py b/windows/win_get_url.py index 10910cf605e..585d3e2aa81 100644 --- a/windows/win_get_url.py +++ b/windows/win_get_url.py @@ -41,7 +41,7 @@ options: required: false default: yes aliases: [] -author: Paul Durivage +author: "Paul Durivage (@angstwad)" ''' EXAMPLES = ''' diff --git a/windows/win_group.py b/windows/win_group.py index 2013b52be53..5e8b0adaaf2 100644 --- a/windows/win_group.py +++ b/windows/win_group.py @@ -50,7 +50,7 @@ options: - absent default: present aliases: [] -author: Chris Hoffman +author: "Chris Hoffman (@chrishoffman)" ''' EXAMPLES = ''' diff --git a/windows/win_msi.py b/windows/win_msi.py index 9eb6f1bafa5..01f09709f57 100644 --- a/windows/win_msi.py +++ b/windows/win_msi.py @@ -45,7 +45,7 @@ options: description: - Path to a file created by installing the MSI to prevent from attempting to reinstall the package on every run -author: Matt Martz +author: "Matt Martz (@sivel)" ''' EXAMPLES = ''' diff --git a/windows/win_ping.py b/windows/win_ping.py index de32877d615..ecb5149f8c3 100644 --- a/windows/win_ping.py +++ b/windows/win_ping.py @@ -35,7 +35,7 @@ options: required: false default: 'pong' aliases: [] -author: Chris Church +author: "Chris Church (@cchurch)" ''' EXAMPLES = ''' diff --git a/windows/win_service.py b/windows/win_service.py index c378be120b1..1f0f6326e65 100644 --- a/windows/win_service.py +++ b/windows/win_service.py @@ -55,7 +55,7 @@ options: - restarted default: null aliases: [] -author: Chris Hoffman +author: "Chris Hoffman (@chrishoffman)" ''' EXAMPLES = ''' diff --git a/windows/win_stat.py b/windows/win_stat.py index c98cd55f599..a933384e20b 100644 --- a/windows/win_stat.py +++ b/windows/win_stat.py @@ -38,7 +38,7 @@ options: required: false default: yes aliases: [] -author: Chris Church +author: "Chris Church (@cchurch)" ''' EXAMPLES = ''' diff --git a/windows/win_user.py b/windows/win_user.py index 82bcf0897ec..376ff487fb3 100644 --- a/windows/win_user.py +++ b/windows/win_user.py @@ -127,7 +127,9 @@ options: - query default: present aliases: [] -author: Paul Durivage / Chris Church +author: + - "Paul Durivage (@angstwad)" + - "Chris Church (@cchurch)" ''' EXAMPLES = ''' From 5465fb8d4f6aa7d22914ba6a8e95b4d8ab2e66f0 Mon Sep 17 00:00:00 2001 From: Sergei Antipov Date: Tue, 16 Jun 2015 10:08:06 +0600 Subject: [PATCH 235/464] mysql_user | Hide password --- database/mysql/mysql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 244333901c3..763e0e7ebd5 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -377,7 +377,7 @@ def main(): login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), user=dict(required=True, aliases=['name']), - password=dict(default=None), + password=dict(default=None, no_log=True), host=dict(default="localhost"), state=dict(default="present", choices=["absent", "present"]), priv=dict(default=None), From 7c6c5180037c15d23cf838decc708b55c5d7ddfa Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 16 Jun 2015 06:28:46 -0700 Subject: [PATCH 236/464] Fix bugs found by @kustodian --- packaging/os/yum.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 6e2b61a189d..624ae298b18 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -871,6 +871,8 @@ def main(): supports_check_mode = True ) + params = module.params + if params['list']: repoquerybin = ensure_yum_utils(module) if not repoquerybin: @@ -882,7 +884,7 @@ def main(): # the system then users will see an error message using the yum API. # Use repoquery in those cases. - my = yum_base(conf_file) + my = yum_base(params['conf_file']) # A sideeffect of accessing conf is that the configuration is # loaded and plugins are discovered my.conf From 92fe35ca7db92da2ea266d6907364e0591e0899f Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Tue, 16 Jun 2015 11:26:33 -0400 Subject: [PATCH 237/464] Updated author data for modules --- cloud/rackspace/rax_clb.py | 4 +++- cloud/rackspace/rax_clb_nodes.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cloud/rackspace/rax_clb.py b/cloud/rackspace/rax_clb.py index a3deae6f4a7..af46d82e0b4 100644 --- a/cloud/rackspace/rax_clb.py +++ b/cloud/rackspace/rax_clb.py @@ -103,7 +103,9 @@ options: description: - how long before wait gives up, in seconds default: 300 -author: Christopher H. Laco, Matt Martz +author: + - "Christopher H. Laco (@claco)" + - "Matt Martz (@sivel)" extends_documentation_fragment: rackspace ''' diff --git a/cloud/rackspace/rax_clb_nodes.py b/cloud/rackspace/rax_clb_nodes.py index 472fad19b1c..d832f5f26eb 100644 --- a/cloud/rackspace/rax_clb_nodes.py +++ b/cloud/rackspace/rax_clb_nodes.py @@ -85,7 +85,7 @@ options: required: false description: - Weight of node -author: Lukasz Kawczynski +author: "Lukasz Kawczynski (@neuroid)" extends_documentation_fragment: rackspace ''' From 86c30e68a113bb4789b8b71bfd78f591a2793c4b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 16 Jun 2015 06:57:32 -0700 Subject: [PATCH 238/464] Restore setting cachedir when non-root but don't take a useless cachedir parameter to the function * Revert "Remove unused code" This reverts commit bcfba0c05098696b6e770335870a9c22792fec38. * Re-add the changes to remove cachedir as a parameter --- packaging/os/yum.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 36fc96f6cea..f809f38c2ed 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -171,6 +171,14 @@ def yum_base(conf_file=None): my.preconf.errorlevel=0 if conf_file and os.path.exists(conf_file): my.preconf.fn = conf_file + if os.geteuid() != 0: + if hasattr(my, 'setCacheDir'): + my.setCacheDir() + else: + cachedir = yum.misc.getCacheDir() + my.repos.setCacheDir(cachedir) + my.conf.cache = 0 + return my def install_yum_utils(module): From fd7bbc9eba11a0954ed183ed09e15c4da87d220b Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Fri, 24 Apr 2015 17:33:00 -0700 Subject: [PATCH 239/464] Added choice validation for state arg fixes traceback on invalid state arg --- cloud/amazon/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index fc1e8125b65..2b38c16ccea 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -1199,7 +1199,7 @@ def main(): instance_profile_name = dict(), instance_ids = dict(type='list', aliases=['instance_id']), source_dest_check = dict(type='bool', default=True), - state = dict(default='present'), + state = dict(default='present', choices=['present', 'absent', 'running', 'stopped']), exact_count = dict(type='int', default=None), count_tag = dict(), volumes = dict(type='list'), From ff0fc73d64cd2467246435097bf25416e4e1cc7e Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 31 Mar 2015 16:37:07 -0400 Subject: [PATCH 240/464] Add OpenStack Floating IP Module Also deprecate the two old quantum floating ip modules. --- ...floating_ip.py => _quantum_floating_ip.py} | 1 + ...e.py => _quantum_floating_ip_associate.py} | 1 + cloud/openstack/os_floating_ip.py | 245 ++++++++++++++++++ 3 files changed, 247 insertions(+) rename cloud/openstack/{quantum_floating_ip.py => _quantum_floating_ip.py} (99%) rename cloud/openstack/{quantum_floating_ip_associate.py => _quantum_floating_ip_associate.py} (99%) create mode 100644 cloud/openstack/os_floating_ip.py diff --git a/cloud/openstack/quantum_floating_ip.py b/cloud/openstack/_quantum_floating_ip.py similarity index 99% rename from cloud/openstack/quantum_floating_ip.py rename to cloud/openstack/_quantum_floating_ip.py index b7599da0725..5220d307844 100644 --- a/cloud/openstack/quantum_floating_ip.py +++ b/cloud/openstack/_quantum_floating_ip.py @@ -36,6 +36,7 @@ version_added: "1.2" author: - "Benno Joy (@bennojoy)" - "Brad P. Crochet (@bcrochet)" +deprecated: Deprecated in 2.0. Use os_floating_ip instead short_description: Add/Remove floating IP from an instance description: - Add or Remove a floating IP to an instance diff --git a/cloud/openstack/quantum_floating_ip_associate.py b/cloud/openstack/_quantum_floating_ip_associate.py similarity index 99% rename from cloud/openstack/quantum_floating_ip_associate.py rename to cloud/openstack/_quantum_floating_ip_associate.py index a5f39dec133..8960e247b0f 100644 --- a/cloud/openstack/quantum_floating_ip_associate.py +++ b/cloud/openstack/_quantum_floating_ip_associate.py @@ -33,6 +33,7 @@ DOCUMENTATION = ''' module: quantum_floating_ip_associate version_added: "1.2" author: "Benno Joy (@bennojoy)" +deprecated: Deprecated in 2.0. Use os_floating_ip instead short_description: Associate or disassociate a particular floating IP with an instance description: - Associates or disassociates a specific floating IP with a particular instance diff --git a/cloud/openstack/os_floating_ip.py b/cloud/openstack/os_floating_ip.py new file mode 100644 index 00000000000..2d939a9bcd7 --- /dev/null +++ b/cloud/openstack/os_floating_ip.py @@ -0,0 +1,245 @@ +#!/usr/bin/python +# coding: utf-8 -*- + +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +DOCUMENTATION = ''' +--- +module: os_floating_ip +version_added: "2.0" +short_description: Add/Remove floating IP from an instance +extends_documentation_fragment: openstack +description: + - Add or Remove a floating IP to an instance +options: + server: + description: + - The name or ID of the instance to which the IP address + should be assigned. + required: true + network_name: + description: + - Name of the network from which IP has to be assigned to VM. + Please make sure the network is an external network. + - Required if ip_address is not given. + required: true + default: None + internal_network_name: + description: + - Name of the network of the port to associate with the floating ip. + Necessary when VM multiple networks. + required: false + default: None + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + required: false + default: present +requirements: ["shade"] +''' + +EXAMPLES = ''' +# Assign a floating ip to the instance from an external network +- os_floating_ip: + cloud: mordred + state: present + server: vm1 + network_name: external_network + internal_network_name: internal_network +''' + + +def _get_server_state(module, cloud): + info = None + server = cloud.get_server(module.params['server']) + if server: + info = server._info + status = info['status'] + if status != 'ACTIVE' and module.params['state'] == 'present': + module.fail_json( + msg="The VM is available but not Active. State: %s" % status + ) + return info, server + + +def _get_port_info(neutron, module, instance_id, internal_network_name=None): + subnet_id = None + if internal_network_name: + kwargs = {'name': internal_network_name} + networks = neutron.list_networks(**kwargs) + network_id = networks['networks'][0]['id'] + kwargs = { + 'network_id': network_id, + 'ip_version': 4 + } + subnets = neutron.list_subnets(**kwargs) + subnet_id = subnets['subnets'][0]['id'] + + kwargs = { + 'device_id': instance_id, + } + try: + ports = neutron.list_ports(**kwargs) + except Exception, e: + module.fail_json(msg="Error in listing ports: %s" % e.message) + + if subnet_id: + port = next(port for port in ports['ports'] if port['fixed_ips'][0]['subnet_id'] == subnet_id) + port_id = port['id'] + fixed_ip_address = port['fixed_ips'][0]['ip_address'] + else: + port_id = ports['ports'][0]['id'] + fixed_ip_address = ports['ports'][0]['fixed_ips'][0]['ip_address'] + + if not ports['ports']: + return None, None + return fixed_ip_address, port_id + + +def _get_floating_ip(neutron, module, fixed_ip_address): + kwargs = { + 'fixed_ip_address': fixed_ip_address + } + try: + ips = neutron.list_floatingips(**kwargs) + except Exception, e: + module.fail_json( + msg="Error in fetching the floatingips's %s" % e.message + ) + + if not ips['floatingips']: + return None, None + + return (ips['floatingips'][0]['id'], + ips['floatingips'][0]['floating_ip_address']) + + +def _create_and_associate_floating_ip(neutron, module, port_id, + net_id, fixed_ip): + kwargs = { + 'port_id': port_id, + 'floating_network_id': net_id, + 'fixed_ip_address': fixed_ip + } + + try: + result = neutron.create_floatingip({'floatingip': kwargs}) + except Exception, e: + module.fail_json( + msg="Error in updating the floating ip address: %s" % e.message + ) + + module.exit_json( + changed=True, + result=result, + public_ip=result['floatingip']['floating_ip_address'] + ) + + +def _get_public_net_id(neutron, module): + kwargs = { + 'name': module.params['network_name'], + } + try: + networks = neutron.list_networks(**kwargs) + except Exception, e: + module.fail_json("Error in listing neutron networks: %s" % e.message) + if not networks['networks']: + return None + return networks['networks'][0]['id'] + + +def _update_floating_ip(neutron, module, port_id, floating_ip_id): + kwargs = { + 'port_id': port_id + } + try: + result = neutron.update_floatingip(floating_ip_id, + {'floatingip': kwargs}) + except Exception, e: + module.fail_json( + msg="Error in updating the floating ip address: %s" % e.message + ) + module.exit_json(changed=True, result=result) + + +def main(): + argument_spec = openstack_full_argument_spec( + server = dict(required=True), + network_name = dict(required=True), + internal_network_name = dict(default=None), + state = dict(default='present', choices=['absent', 'present']), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule(argument_spec, **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + state = module.params['state'] + internal_network_name = module.params['internal_network_name'] + + try: + cloud = shade.openstack_cloud(**module.params) + neutron = cloud.neutron_client + + server_info, server_obj = _get_server_state(module, cloud) + if not server_info: + module.fail_json(msg="The server provided cannot be found") + + fixed_ip, port_id = _get_port_info( + neutron, module, server_info['id'], internal_network_name) + if not port_id: + module.fail_json(msg="Cannot find a port for this instance," + " maybe fixed ip is not assigned") + + floating_id, floating_ip = _get_floating_ip(neutron, module, fixed_ip) + + if state == 'present': + if floating_ip: + # This server already has a floating IP assigned + module.exit_json(changed=False, public_ip=floating_ip) + + pub_net_id = _get_public_net_id(neutron, module) + if not pub_net_id: + module.fail_json( + msg="Cannot find the public network specified" + ) + _create_and_associate_floating_ip(neutron, module, port_id, + pub_net_id, fixed_ip) + + elif state == 'absent': + if floating_ip: + _update_floating_ip(neutron, module, None, floating_id) + module.exit_json(changed=False) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() From 898e3383189809677a529627330109b49343a2be Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 16 Jun 2015 18:56:24 -0400 Subject: [PATCH 241/464] minor doc fixes --- cloud/amazon/rds.py | 4 ++-- cloud/openstack/os_ironic.py | 32 +++++++++++++++------------- cloud/openstack/os_ironic_node.py | 5 +++-- cloud/openstack/os_object.py | 4 ++-- cloud/openstack/os_security_group.py | 3 +-- 5 files changed, 25 insertions(+), 23 deletions(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 7dc1955c558..71ead8ad10b 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -244,9 +244,9 @@ options: requirements: - "python >= 2.6" - "boto" -author: +author: - "Bruce Pennypacker (@bpennypacker)" - - "Will Thames (@willthames") + - "Will Thames (@willthames)" ''' diff --git a/cloud/openstack/os_ironic.py b/cloud/openstack/os_ironic.py index 137effe6073..0ec4366b79f 100644 --- a/cloud/openstack/os_ironic.py +++ b/cloud/openstack/os_ironic.py @@ -28,6 +28,7 @@ DOCUMENTATION = ''' module: os_ironic short_description: Create/Delete Bare Metal Resources from OpenStack extends_documentation_fragment: openstack +author: "Monty Taylor (@emonty)" version_added: "2.0" description: - Create or Remove Ironic nodes from OpenStack. @@ -71,28 +72,30 @@ options: - Information for this server's driver. Will vary based on which driver is in use. Any sub-field which is populated will be validated during creation. + suboptions: power: - - Information necessary to turn this server on / off. This often - includes such things as IPMI username, password, and IP address. - required: true + description: + - Information necessary to turn this server on / off. + This often includes such things as IPMI username, password, and IP address. + required: true deploy: - - Information necessary to deploy this server directly, without - using Nova. THIS IS NOT RECOMMENDED. + description: + - Information necessary to deploy this server directly, without using Nova. THIS IS NOT RECOMMENDED. console: - - Information necessary to connect to this server's serial console. - Not all drivers support this. + description: + - Information necessary to connect to this server's serial console. Not all drivers support this. management: - - Information necessary to interact with this server's management - interface. May be shared by power_info in some cases. - required: true + description: + - Information necessary to interact with this server's management interface. May be shared by power_info in some cases. + required: true nics: description: - - A list of network interface cards, eg, " - mac: aa:bb:cc:aa:bb:cc" + - 'A list of network interface cards, eg, " - mac: aa:bb:cc:aa:bb:cc"' required: true properties: description: - - Definition of the physical characteristics of this server, used for - scheduling purposes + - Definition of the physical characteristics of this server, used for scheduling purposes + suboptions: cpu_arch: description: - CPU architecture (x86_64, i686, ...) @@ -107,8 +110,7 @@ options: default: 1 disk_size: description: - - size of first storage device in this machine (typically - /dev/sda), in GB + - size of first storage device in this machine (typically /dev/sda), in GB default: 1 skip_update_of_driver_password: description: diff --git a/cloud/openstack/os_ironic_node.py b/cloud/openstack/os_ironic_node.py index a50d6897e5c..f087581ca0a 100644 --- a/cloud/openstack/os_ironic_node.py +++ b/cloud/openstack/os_ironic_node.py @@ -26,7 +26,9 @@ DOCUMENTATION = ''' --- module: os_ironic_node short_description: Activate/Deactivate Bare Metal Resources from OpenStack +author: "Monty Taylor (@emonty)" extends_documentation_fragment: openstack +version_added: "2.0" description: - Deploy to nodes controlled by Ironic. options: @@ -65,6 +67,7 @@ options: - Definition of the instance information which is used to deploy the node. This information is only required when an instance is set to present. + suboptions: image_source: description: - An HTTP(S) URL where the image can be retrieved from. @@ -93,8 +96,6 @@ options: maintenance mode. required: false default: None - -requirements: ["shade"] ''' EXAMPLES = ''' diff --git a/cloud/openstack/os_object.py b/cloud/openstack/os_object.py index ed58bb1e705..a0eaaeef600 100644 --- a/cloud/openstack/os_object.py +++ b/cloud/openstack/os_object.py @@ -27,7 +27,8 @@ DOCUMENTATION = ''' --- module: os_object short_description: Create or Delete objects and containers from OpenStack -version_added: "1.10" +version_added: "2.0" +author: "Monty Taylor (@emonty)" extends_documentation_fragment: openstack description: - Create or Delete objects and containers from OpenStack @@ -56,7 +57,6 @@ options: - Should the resource be present or absent. choices: [present, absent] default: present -requirements: ["shade"] ''' EXAMPLES = ''' diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index 4aaff2470d6..a3d1262d6fe 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -28,6 +28,7 @@ DOCUMENTATION = ''' module: os_security_group short_description: Add/Delete security groups from an OpenStack cloud. extends_documentation_fragment: openstack +author: "Monty Taylor (@emonty)" version_added: "2.0" description: - Add or Remove security groups from an OpenStack cloud. @@ -47,8 +48,6 @@ options: - Should the resource be present or absent. choices: [present, absent] default: present - -requirements: ["shade"] ''' EXAMPLES = ''' From dadc1faebd9a177f66f39830d4c65efe9d559870 Mon Sep 17 00:00:00 2001 From: Konstantin Gribov Date: Tue, 2 Jun 2015 16:14:07 +0300 Subject: [PATCH 242/464] Escape spaces, backslashes and ampersands in fstab Fixes #530. It's more generic than #578 which only fixes spaces escaping in name (target dir to mount). Escaping is used in both `set_mount` (important for `src`, `name` and `opts`) and `unset_mount` (for `name`). It's shouldn't be used in `mount` and `umount` since `name` parameter is passed as array element to `module.run_command`. Signed-off-by: Konstantin Gribov --- system/mount.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/system/mount.py b/system/mount.py index e11d497220b..d41d1f936e2 100644 --- a/system/mount.py +++ b/system/mount.py @@ -102,6 +102,10 @@ def write_fstab(lines, dest): fs_w.flush() fs_w.close() +def _escape_fstab(v): + """ escape space (040), ampersand (046) and backslash (134) which are invalid in fstab fields """ + return v.replace('\\', '\\134').replace(' ', '\\040').replace('&', '\\046') + def set_mount(**kwargs): """ set/change a mount point location in fstab """ @@ -119,6 +123,7 @@ def set_mount(**kwargs): to_write = [] exists = False changed = False + escaped_args = dict([(k, _escape_fstab(v)) for k, v in args.iteritems()]) for line in open(args['fstab'], 'r').readlines(): if not line.strip(): to_write.append(line) @@ -135,16 +140,16 @@ def set_mount(**kwargs): ld = {} ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() - if ld['name'] != args['name']: + if ld['name'] != escaped_args['name']: to_write.append(line) continue # it exists - now see if what we have is different exists = True for t in ('src', 'fstype','opts', 'dump', 'passno'): - if ld[t] != args[t]: + if ld[t] != escaped_args[t]: changed = True - ld[t] = args[t] + ld[t] = escaped_args[t] if changed: to_write.append(new_line % ld) @@ -175,6 +180,7 @@ def unset_mount(**kwargs): to_write = [] changed = False + escaped_name = _escape_fstab(args['name']) for line in open(args['fstab'], 'r').readlines(): if not line.strip(): to_write.append(line) @@ -191,7 +197,7 @@ def unset_mount(**kwargs): ld = {} ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() - if ld['name'] != args['name']: + if ld['name'] != escaped_name: to_write.append(line) continue @@ -260,8 +266,6 @@ def main(): args['passno'] = module.params['passno'] if module.params['opts'] is not None: args['opts'] = module.params['opts'] - if ' ' in args['opts']: - module.fail_json(msg="unexpected space in 'opts' parameter") if module.params['dump'] is not None: args['dump'] = module.params['dump'] if module.params['fstab'] is not None: From 274abb96b1e28b69868e9ce1df04a66cedca286b Mon Sep 17 00:00:00 2001 From: Philip Kirkland Date: Wed, 17 Jun 2015 14:33:37 +1000 Subject: [PATCH 243/464] adding 'encrypted' option into volume dict so ec2 module can easily include encrypted volumes --- cloud/amazon/ec2.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 2b38c16ccea..20d49ce5995 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -201,7 +201,7 @@ options: volumes: version_added: "1.5" description: - - a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. + - a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. Encrypt the volume by passing 'encrypted: true' in the volume dict. required: false default: null aliases: [] @@ -672,7 +672,8 @@ def create_block_device(module, ec2, volume): size=volume.get('volume_size'), volume_type=volume.get('device_type'), delete_on_termination=volume.get('delete_on_termination', False), - iops=volume.get('iops')) + iops=volume.get('iops'), + encrypted=volume.get('encrypted', False)) def boto_supports_param_in_spot_request(ec2, param): """ From ef22008cac3dae41d7318cc9b110b8c32925ccc4 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 17 Jun 2015 04:20:42 -0400 Subject: [PATCH 244/464] Return secgroup, not just id --- cloud/openstack/os_security_group.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index a3d1262d6fe..67730a252f1 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -117,12 +117,14 @@ def main(): if state == 'present': if not secgroup: secgroup = cloud.create_security_group(name, description) - module.exit_json(changed=True, id=secgroup['id']) + module.exit_json( + changed=True, id=secgroup.id, secgroup=secgroup) else: if _needs_update(module, secgroup): secgroup = cloud.update_security_group( secgroup['id'], description=description) - module.exit_json(changed=True, id=secgroup['id']) + module.exit_json( + changed=True, id=secgroup.id, secgroup=secgroup) else: module.exit_json(changed=False) From 5b84b0d136c75ca964d1fcbb8ddc2359a98f9cfe Mon Sep 17 00:00:00 2001 From: acaveroc Date: Wed, 17 Jun 2015 10:37:47 +0200 Subject: [PATCH 245/464] Add port definition support for mysql_vars module --- database/mysql/mysql_variables.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py index 0b0face0328..753d37433e3 100644 --- a/database/mysql/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -52,6 +52,10 @@ options: description: - mysql host to connect required: False + login_port: + description: + - mysql port to connect + required: False login_unix_socket: description: - unix socket to connect mysql server @@ -194,6 +198,7 @@ def main(): login_user=dict(default=None), login_password=dict(default=None), login_host=dict(default="localhost"), + login_port=dict(default="3306"), login_unix_socket=dict(default=None), variable=dict(default=None), value=dict(default=None) @@ -203,6 +208,7 @@ def main(): user = module.params["login_user"] password = module.params["login_password"] host = module.params["login_host"] + port = module.params["login_port"] mysqlvar = module.params["variable"] value = module.params["value"] if not mysqldb_found: @@ -227,9 +233,9 @@ def main(): module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") try: if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") + db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") else: - db_connection = MySQLdb.connect(host=module.params["login_host"], user=login_user, passwd=login_password, db="mysql") + db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql") cursor = db_connection.cursor() except Exception, e: module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") From 5609d1dc5a480c89ef7e419377f8d2e27674e899 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 17 Jun 2015 05:24:08 -0400 Subject: [PATCH 246/464] Return resource objects from OpenStack modules It's not uncommon for people to want to do additional things after creating a module. Also, add a note about it to the dev notes. --- cloud/openstack/README.md | 7 +++++++ cloud/openstack/os_image.py | 12 ++++++------ cloud/openstack/os_network.py | 8 +++----- cloud/openstack/os_object.py | 2 +- cloud/openstack/os_server.py | 3 ++- cloud/openstack/os_subnet.py | 12 +++++++----- cloud/openstack/os_volume.py | 8 ++++---- 7 files changed, 30 insertions(+), 22 deletions(-) diff --git a/cloud/openstack/README.md b/cloud/openstack/README.md index a9b22234add..4a872b11954 100644 --- a/cloud/openstack/README.md +++ b/cloud/openstack/README.md @@ -21,6 +21,13 @@ Naming * If the module is one that a cloud admin and a cloud consumer could both use, the cloud consumer rules apply. +Interface +--------- + +* If the resource being managed has an id, it should be returned. +* If the resource being managed has an associated object more complex than + an id, it should also be returned. + Interoperability ---------------- diff --git a/cloud/openstack/os_image.py b/cloud/openstack/os_image.py index 115a3f2b4f8..4687ce5e972 100644 --- a/cloud/openstack/os_image.py +++ b/cloud/openstack/os_image.py @@ -148,7 +148,7 @@ def main(): if module.params['state'] == 'present': if not image: - result = cloud.create_image( + image = cloud.create_image( name=module.params['name'], filename=module.params['filename'], disk_format=module.params['disk_format'], @@ -158,26 +158,26 @@ def main(): ) changed = True if not module.params['wait']: - module.exit_json(changed=changed, result=result) - image = cloud.get_image(name_or_id=result['id']) + module.exit_json(changed=changed, image=image, id=image.id) cloud.update_image_properties( image=image, kernel=module.params['kernel'], ramdisk=module.params['ramdisk'], **module.params['properties']) + image = cloud.get_image(name_or_id=image.id) + module.exit_json(changed=changed, image=image, id=image.id) elif module.params['state'] == 'absent': if not image: - module.exit_json(changed=False, result="success") + changed = False else: cloud.delete_image( name_or_id=module.params['name'], wait=module.params['wait'], timeout=module.params['timeout']) changed = True - - module.exit_json(changed=changed, id=image.id, result="success") + module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message, extra_data=e.extra_data) diff --git a/cloud/openstack/os_network.py b/cloud/openstack/os_network.py index 9c6174462f7..75c431493f6 100644 --- a/cloud/openstack/os_network.py +++ b/cloud/openstack/os_network.py @@ -88,16 +88,14 @@ def main(): if state == 'present': if not net: net = cloud.create_network(name, shared, admin_state_up) - module.exit_json(changed=True, result="Created", id=net['id']) - else: - module.exit_json(changed=False, result="Success", id=net['id']) + module.exit_json(changed=False, network=net, id=net['id']) elif state == 'absent': if not net: - module.exit_json(changed=False, result="Success") + module.exit_json(changed=False) else: cloud.delete_network(name) - module.exit_json(changed=True, result="Deleted") + module.exit_json(changed=True) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) diff --git a/cloud/openstack/os_object.py b/cloud/openstack/os_object.py index a0eaaeef600..a009d913a8a 100644 --- a/cloud/openstack/os_object.py +++ b/cloud/openstack/os_object.py @@ -115,7 +115,7 @@ def main(): changed = process_object(cloud, **module.params) - module.exit_json(changed=changed, result="success") + module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index 932cebcc1c3..78a46f78c04 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -241,7 +241,8 @@ EXAMPLES = ''' def _exit_hostvars(module, cloud, server, changed=True): hostvars = meta.get_hostvars_from_server(cloud, server) - module.exit_json(changed=changed, id=server.id, openstack=hostvars) + module.exit_json( + changed=changed, server=server, id=server.id, openstack=hostvars) def _network_args(module, cloud): diff --git a/cloud/openstack/os_subnet.py b/cloud/openstack/os_subnet.py index 54672b35ffb..f96ce9fd633 100644 --- a/cloud/openstack/os_subnet.py +++ b/cloud/openstack/os_subnet.py @@ -227,7 +227,7 @@ def main(): dns_nameservers=dns, allocation_pools=pool, host_routes=host_routes) - module.exit_json(changed=True, result="created") + changed = True else: if _needs_update(subnet, module): cloud.update_subnet(subnet['id'], @@ -237,16 +237,18 @@ def main(): dns_nameservers=dns, allocation_pools=pool, host_routes=host_routes) - module.exit_json(changed=True, result="updated") + changed = True else: - module.exit_json(changed=False, result="success") + changed = False + module.exit_json(changed=changed) elif state == 'absent': if not subnet: - module.exit_json(changed=False, result="success") + changed = False else: + changed = True cloud.delete_subnet(subnet_name) - module.exit_json(changed=True, result="deleted") + module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) diff --git a/cloud/openstack/os_volume.py b/cloud/openstack/os_volume.py index 87b7d9eab54..80ad2adcbb2 100644 --- a/cloud/openstack/os_volume.py +++ b/cloud/openstack/os_volume.py @@ -90,7 +90,7 @@ EXAMPLES = ''' def _present_volume(module, cloud): if cloud.volume_exists(module.params['display_name']): v = cloud.get_volume(module.params['display_name']) - module.exit_json(changed=False, id=v['id']) + module.exit_json(changed=False, id=v['id'], volume=v) volume_args = dict( size=module.params['size'], @@ -107,7 +107,7 @@ def _present_volume(module, cloud): volume = cloud.create_volume( wait=module.params['wait'], timeout=module.params['timeout'], **volume_args) - module.exit_json(changed=True, id=volume['id']) + module.exit_json(changed=True, id=volume['id'], volume=volume) def _absent_volume(module, cloud): @@ -117,8 +117,8 @@ def _absent_volume(module, cloud): wait=module.params['wait'], timeout=module.params['timeout']) except shade.OpenStackCloudTimeout: - module.exit_json(changed=False, result="Volume deletion timed-out") - module.exit_json(changed=True, result='Volume Deleted') + module.exit_json(changed=False) + module.exit_json(changed=True) def main(): From 7d2a5965bd5fc95196a3d3427acaa5ca086e4e80 Mon Sep 17 00:00:00 2001 From: acaveroc Date: Wed, 17 Jun 2015 13:53:08 +0200 Subject: [PATCH 247/464] Assorted minor bug fixes - Modified data type for port definition from string to integer - Modified login_host default value for compatibilize with port definition according with MySQL Documentation (https://dev.mysql.com/doc/refman/5.0/en/connecting.html) --- database/mysql/mysql_variables.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py index 753d37433e3..36415df5460 100644 --- a/database/mysql/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -197,7 +197,7 @@ def main(): argument_spec = dict( login_user=dict(default=None), login_password=dict(default=None), - login_host=dict(default="localhost"), + login_host=dict(default="127.0.0.1"), login_port=dict(default="3306"), login_unix_socket=dict(default=None), variable=dict(default=None), @@ -233,9 +233,9 @@ def main(): module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") try: if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") + db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql") + db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password, db="mysql") cursor = db_connection.cursor() except Exception, e: module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") From 1a11f07d2957567dfd95e59bfaaa966b8785b4f2 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 17 Jun 2015 08:02:34 -0400 Subject: [PATCH 248/464] Return the secgroup for all present states --- cloud/openstack/os_security_group.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index 67730a252f1..268b2563d33 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -115,18 +115,17 @@ def main(): module.exit_json(changed=_system_state_change(module, secgroup)) if state == 'present': + changed = False if not secgroup: secgroup = cloud.create_security_group(name, description) - module.exit_json( - changed=True, id=secgroup.id, secgroup=secgroup) + changed = True else: if _needs_update(module, secgroup): secgroup = cloud.update_security_group( secgroup['id'], description=description) - module.exit_json( - changed=True, id=secgroup.id, secgroup=secgroup) - else: - module.exit_json(changed=False) + changed = True + module.exit_json( + changed=True, id=secgroup.id, secgroup=secgroup) if state == 'absent': if not secgroup: From d8c51a67f3d0ef86ca6b7b43f8b0e1556e62e520 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 17 Jun 2015 08:47:14 -0400 Subject: [PATCH 249/464] Actually use changed variable --- cloud/openstack/os_security_group.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index 268b2563d33..51e7df772a1 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -114,8 +114,8 @@ def main(): if module.check_mode: module.exit_json(changed=_system_state_change(module, secgroup)) + changed = False if state == 'present': - changed = False if not secgroup: secgroup = cloud.create_security_group(name, description) changed = True @@ -125,14 +125,13 @@ def main(): secgroup['id'], description=description) changed = True module.exit_json( - changed=True, id=secgroup.id, secgroup=secgroup) + changed=changed, id=secgroup.id, secgroup=secgroup) if state == 'absent': - if not secgroup: - module.exit_json(changed=False) - else: + if secgroup: cloud.delete_security_group(secgroup['id']) - module.exit_json(changed=True) + changed=True + module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From 64fe01178f0bb71acf714ea3902b6077c53f7349 Mon Sep 17 00:00:00 2001 From: Robin Roth Date: Wed, 17 Jun 2015 18:01:35 +0200 Subject: [PATCH 250/464] add comment: ports need to be exposed. port mapping with this module only works for ports that are exposed either in the Dockerfile or via an additional arguments. This is different from the command line docker client, that is willing to also map ports that are not exposed. This comments makes the behaviour more obvious. --- cloud/docker/docker.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 44ed3ecf038..2d33424d8af 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -62,7 +62,8 @@ options: - List containing private to public port mapping specification. Use docker - 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000)' - where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is - - a host interface. + - a host interface. The container ports need to be exposed either in the + - Dockerfile or via the next option. default: null version_added: "1.5" expose: From 19f60997776364d6bbccde62c03a63323d06b1de Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 17 Jun 2015 12:39:14 -0400 Subject: [PATCH 251/464] minor doc fixes --- cloud/docker/docker.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 2d33424d8af..f9e96c21eb2 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -59,11 +59,10 @@ options: version_added: "1.5" ports: description: - - List containing private to public port mapping specification. Use docker - - 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000)' - - where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is - - a host interface. The container ports need to be exposed either in the - - Dockerfile or via the next option. + - "List containing private to public port mapping specification. + Use docker 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000)' + where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is - a host interface. + The container ports need to be exposed either in the Dockerfile or via the C(expose) option." default: null version_added: "1.5" expose: From e7876df99f7e68467c0c6da0939fb5ba07f9ee14 Mon Sep 17 00:00:00 2001 From: acaveroc Date: Thu, 18 Jun 2015 09:43:32 +0200 Subject: [PATCH 252/464] Add version_added and type of parameter --- database/mysql/mysql_variables.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py index 36415df5460..f50ed740539 100644 --- a/database/mysql/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -53,6 +53,7 @@ options: - mysql host to connect required: False login_port: + version_added: "1.9" description: - mysql port to connect required: False @@ -198,7 +199,7 @@ def main(): login_user=dict(default=None), login_password=dict(default=None), login_host=dict(default="127.0.0.1"), - login_port=dict(default="3306"), + login_port=dict(default="3306", type='int'), login_unix_socket=dict(default=None), variable=dict(default=None), value=dict(default=None) @@ -233,9 +234,9 @@ def main(): module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") try: if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") + db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password, db="mysql") + db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql") cursor = db_connection.cursor() except Exception, e: module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") From f4a86aab7fa85c470e3232dd17bb226501d300dd Mon Sep 17 00:00:00 2001 From: Vyronas Tsingaras Date: Thu, 18 Jun 2015 13:55:54 +0300 Subject: [PATCH 253/464] Use either esxi:dc,host OR cluster,resource_pool vsphere_guest now can deploy a template using a datacenter and hostname as the target, instead of requiring a cluster and resource_pool. This commit fixes #951. Signed-off-by: Vyronas Tsingaras --- cloud/vmware/vsphere_guest.py | 82 ++++++++++++++++++++++++++++------- 1 file changed, 67 insertions(+), 15 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 7c41e6e4d1d..4277b250ffa 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -526,22 +526,74 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo vmTemplate = vsphere_client.get_vm_by_name(template_src) vmTarget = None - try: - cluster = [k for k, - v in vsphere_client.get_clusters().items() if v == cluster_name][0] - except IndexError, e: - vsphere_client.disconnect() - module.fail_json(msg="Cannot find Cluster named: %s" % - cluster_name) + if esxi: + datacenter = esxi['datacenter'] + esxi_hostname = esxi['hostname'] + + # Datacenter managed object reference + dclist = [k for k, + v in vsphere_client.get_datacenters().items() if v == datacenter] + if dclist: + dcmor=dclist[0] + else: + vsphere_client.disconnect() + module.fail_json(msg="Cannot find datacenter named: %s" % datacenter) - try: - rpmor = [k for k, v in vsphere_client.get_resource_pools( - from_mor=cluster).items() - if v == resource_pool][0] - except IndexError, e: - vsphere_client.disconnect() - module.fail_json(msg="Cannot find Resource Pool named: %s" % - resource_pool) + dcprops = VIProperty(vsphere_client, dcmor) + + # hostFolder managed reference + hfmor = dcprops.hostFolder._obj + + # Grab the computerResource name and host properties + crmors = vsphere_client._retrieve_properties_traversal( + property_names=['name', 'host'], + from_node=hfmor, + obj_type='ComputeResource') + + # Grab the host managed object reference of the esxi_hostname + try: + hostmor = [k for k, + v in vsphere_client.get_hosts().items() if v == esxi_hostname][0] + except IndexError, e: + vsphere_client.disconnect() + module.fail_json(msg="Cannot find esx host named: %s" % esxi_hostname) + + # Grab the computeResource managed object reference of the host we are + # creating the VM on. + crmor = None + for cr in crmors: + if crmor: + break + for p in cr.PropSet: + if p.Name == "host": + for h in p.Val.get_element_ManagedObjectReference(): + if h == hostmor: + crmor = cr.Obj + break + if crmor: + break + crprops = VIProperty(vsphere_client, crmor) + + rpmor = crprops.resourcePool._obj + elif resource_pool: + try: + cluster = [k for k, + v in vsphere_client.get_clusters().items() if v == cluster_name][0] + except IndexError, e: + vsphere_client.disconnect() + module.fail_json(msg="Cannot find Cluster named: %s" % + cluster_name) + + try: + rpmor = [k for k, v in vsphere_client.get_resource_pools( + from_mor=cluster).items() + if v == resource_pool][0] + except IndexError, e: + vsphere_client.disconnect() + module.fail_json(msg="Cannot find Resource Pool named: %s" % + resource_pool) + else: + module.fail_json(msg="You need to specify either esxi:[datacenter,hostname] or [cluster,resource_pool]") try: vmTarget = vsphere_client.get_vm_by_name(guest) From 5e72c5ad3669d6796da8c3440d8b14daf8ef3aa2 Mon Sep 17 00:00:00 2001 From: Vyronas Tsingaras Date: Thu, 18 Jun 2015 14:23:43 +0300 Subject: [PATCH 254/464] Properly check if vm_hardware contains keys This commit fixes #1475 Signed-off-by: Vyronas Tsingaras --- cloud/vmware/vsphere_guest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 7c41e6e4d1d..3142936062d 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -577,7 +577,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled) # Change Memory - if vm_hardware['memory_mb']: + if 'memory_mb' in vm_hardware: if int(vm_hardware['memory_mb']) != vm.properties.config.hardware.memoryMB: spec = spec_singleton(spec, request, vm) @@ -607,7 +607,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name changes['memory'] = vm_hardware['memory_mb'] # ====( Config Memory )====# - if vm_hardware['num_cpus']: + if 'num_cpus' in vm_hardware: if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU: spec = spec_singleton(spec, request, vm) From 759c2de7f98b3bf0979cafb804df982d27dcf5fd Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 18 Jun 2015 07:56:50 -0400 Subject: [PATCH 255/464] Add filter ability --- cloud/openstack/os_client_config.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/cloud/openstack/os_client_config.py b/cloud/openstack/os_client_config.py index 100608b0fd0..a12cd8fe65a 100644 --- a/cloud/openstack/os_client_config.py +++ b/cloud/openstack/os_client_config.py @@ -25,6 +25,15 @@ short_description: Get OpenStack Client config description: - Get I(openstack) client config data from clouds.yaml or environment version_added: "2.0" +notes: + - Facts are placed in the C(openstack.clouds) variable. +options: + clouds: + description: + - List of clouds to limit the return list to. No value means return + information on all configured clouds + required: false + default: [] requirements: [ os-client-config ] author: "Monty Taylor (@emonty)" ''' @@ -34,19 +43,27 @@ EXAMPLES = ''' - os-client-config: - debug: var={{ item }} with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}" + +# Get the information back just about the mordred cloud +- os-client-config: + clouds: + - mordred ''' def main(): - module = AnsibleModule({}) + module = AnsibleModule({ + clouds=dict(required=False, default=[]), + }) p = module.params try: config = os_client_config.OpenStackConfig() clouds = [] for cloud in config.get_all_clouds(): - cloud.config['name'] = cloud.name - clouds.append(cloud.config) + if not module.params['clouds'] or cloud.name in module.param['clouds']: + cloud.config['name'] = cloud.name + clouds.append(cloud.config) module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds))) except exceptions.OpenStackConfigException as e: module.fail_json(msg=str(e)) From a226701efe836e3c288a1624dfd820928dcd0c16 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 31 Mar 2015 16:37:07 -0400 Subject: [PATCH 256/464] Add OpenStack Keypair module Also deprecate old nova_keypair module. --- .../{nova_keypair.py => _nova_keypair.py} | 1 + cloud/openstack/os_keypair.py | 140 ++++++++++++++++++ 2 files changed, 141 insertions(+) rename cloud/openstack/{nova_keypair.py => _nova_keypair.py} (99%) create mode 100644 cloud/openstack/os_keypair.py diff --git a/cloud/openstack/nova_keypair.py b/cloud/openstack/_nova_keypair.py similarity index 99% rename from cloud/openstack/nova_keypair.py rename to cloud/openstack/_nova_keypair.py index b2e38ff7db9..68df0c5a2c4 100644 --- a/cloud/openstack/nova_keypair.py +++ b/cloud/openstack/_nova_keypair.py @@ -32,6 +32,7 @@ version_added: "1.2" author: - "Benno Joy (@bennojoy)" - "Michael DeHaan" +deprecated: Deprecated in 2.0. Use os_keypair instead short_description: Add/Delete key pair from nova description: - Add or Remove key pair from nova . diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py new file mode 100644 index 00000000000..c4725552725 --- /dev/null +++ b/cloud/openstack/os_keypair.py @@ -0,0 +1,140 @@ +#!/usr/bin/python + +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# Copyright (c) 2013, John Dewey +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + + +DOCUMENTATION = ''' +--- +module: os_keypair +short_description: Add/Delete a keypair from OpenStack +extends_documentation_fragment: openstack +version_added: "2.0" +description: + - Add or Remove key pair from OpenStack +options: + name: + description: + - Name that has to be given to the key pair + required: true + default: None + public_key: + description: + - The public key that would be uploaded to nova and injected to vm's upon creation + required: false + default: None + public_key_file: + description: + - Path to local file containing ssh public key. Mutually exclusive with public_key + required: false + default: None + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present +requirements: ["shade"] +''' + +EXAMPLES = ''' +# Creates a key pair with the running users public key +- os_keypair: + cloud: mordred + state: present + name: ansible_key + public_key: "{{ lookup('file','~/.ssh/id_rsa.pub') }}" + +# Creates a new key pair and the private key returned after the run. +- os_keypair: + cloud: rax-dfw + state: present + name: ansible_key +''' + + +def main(): + argument_spec = openstack_full_argument_spec( + name = dict(required=True), + public_key = dict(default=None), + public_key_file = dict(default=None), + state = dict(default='present', choices=['absent', 'present']), + ) + module_kwargs = openstack_module_kwargs( + mutually_exclusive=[['public_key', 'public_key_file']]) + module = AnsibleModule(argument_spec, **module_kwargs) + + if module.params['public_key_file']: + public_key = open(module.params['public_key_file']).read() + else: + public_key = module.params['public_key'] + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + state = module.params['state'] + name = module.params['name'] + public_key = module.params['public_key'] + + try: + cloud = shade.openstack_cloud(**module.params) + + if state == 'present': + for key in cloud.list_keypairs(): + if key.name == name: + if public_key and (public_key != key.public_key): + module.fail_json( + msg="Key name %s present but key hash not the same" + " as offered. Delete key first." % key.name + ) + else: + module.exit_json(changed=False, result="Key present") + try: + key = cloud.create_keypair(name, public_key) + except Exception, e: + module.exit_json( + msg="Error in creating the keypair: %s" % e.message + ) + if not public_key: + module.exit_json(changed=True, key=key.private_key) + module.exit_json(changed=True, key=None) + + elif state == 'absent': + for key in cloud.list_keypairs(): + if key.name == name: + try: + cloud.delete_keypair(name) + except Exception, e: + module.fail_json( + msg="Keypair deletion has failed: %s" % e.message + ) + module.exit_json(changed=True, result="deleted") + module.exit_json(changed=False, result="not present") + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() From 82dc5c4394ab88e055debed6b0d7d397f11638d7 Mon Sep 17 00:00:00 2001 From: Davide Guerri Date: Thu, 4 Jun 2015 19:30:34 +0100 Subject: [PATCH 257/464] Avoind using lookup() in documentation lookup() is currently broken (current Ansible devel branch), so better to avoid it in our examples. --- cloud/openstack/os_keypair.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index c4725552725..c6794b47826 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -63,7 +63,7 @@ EXAMPLES = ''' cloud: mordred state: present name: ansible_key - public_key: "{{ lookup('file','~/.ssh/id_rsa.pub') }}" + public_key_file: ~/.ssh/id_rsa.pub # Creates a new key pair and the private key returned after the run. - os_keypair: From 02d0a73906bcd6e1c8805825a23b49df027c65a9 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 18 Jun 2015 07:59:32 -0400 Subject: [PATCH 258/464] Move the order of argument processing --- cloud/openstack/os_keypair.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index c6794b47826..b404e6cc02a 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -84,18 +84,16 @@ def main(): mutually_exclusive=[['public_key', 'public_key_file']]) module = AnsibleModule(argument_spec, **module_kwargs) + state = module.params['state'] + name = module.params['name'] + public_key = module.params['public_key'] + if module.params['public_key_file']: public_key = open(module.params['public_key_file']).read() - else: - public_key = module.params['public_key'] if not HAS_SHADE: module.fail_json(msg='shade is required for this module') - state = module.params['state'] - name = module.params['name'] - public_key = module.params['public_key'] - try: cloud = shade.openstack_cloud(**module.params) From 81528e68343b890408bc3598b96d97f99a84ecb0 Mon Sep 17 00:00:00 2001 From: Ritesh Khadgaray Date: Thu, 18 Jun 2015 20:38:19 +0530 Subject: [PATCH 259/464] Add the ability to grep for content --- files/find.py | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/files/find.py b/files/find.py index d5441aad273..659ec16026e 100644 --- a/files/find.py +++ b/files/find.py @@ -53,6 +53,11 @@ options: - One or more (shell type) file glob patterns, which restrict the list of files to be returned to those whose basenames match at least one of the patterns specified. Multiple patterns can be specified using a list. + contains: + required: false + default: null + description: + - One or more re patterns which should be matched against the file content paths: required: true aliases: [ "name" ] @@ -96,7 +101,7 @@ options: default: "False" choices: [ True, False ] description: - - Set this to true to follow symlinks in path. + - Set this to true to follow symlinks in path for systems with python 2.6+ get_checksum: required: false default: "False" @@ -177,6 +182,23 @@ def sizefilter(st, size): return False +def contentfilter(fsname, pattern): + '''filter files which contain the given expression''' + if pattern is None: return True + + try: + f = open(fsname) + prog = re.compile(pattern) + for line in f: + if prog.match (line): + f.close() + return True + + f.close() + except: + pass + + return False def statinfo(st): return { @@ -216,6 +238,7 @@ def main(): argument_spec = dict( paths = dict(required=True, aliases=['name'], type='list'), patterns = dict(default=['*'], type='list'), + contains = dict(default=None, type='str'), file_type = dict(default="file", choices=['file', 'directory'], type='str'), age = dict(default=None, type='str'), age_stamp = dict(default="mtime", choices=['atime','mtime','ctime'], type='str'), @@ -258,8 +281,10 @@ def main(): looked = 0 for npath in params['paths']: if os.path.isdir(npath): - for root,dirs,files in os.walk( npath, followlinks=params['follow'] ): + ''' ignore followlinks for python version < 2.6 ''' + for root,dirs,files in (sys.version_info < (2,6,0) and os.walk(npath)) or \ + os.walk( npath, followlinks=params['follow']): looked = looked + len(files) + len(dirs) for fsobj in (files + dirs): fsname=os.path.normpath(os.path.join(root, fsobj)) @@ -278,7 +303,8 @@ def main(): elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file': if pfilter(fsobj, params['patterns']) and \ agefilter(st, now, age, params['age_stamp']) and \ - sizefilter(st, size): + sizefilter(st, size) and \ + contentfilter(fsname, params['contains']): r.update(statinfo(st)) if params['get_checksum']: From 96d2c09457bf7976a110124d65c6db717def798b Mon Sep 17 00:00:00 2001 From: Colin Hutchinson Date: Thu, 18 Jun 2015 11:48:27 -0400 Subject: [PATCH 260/464] Indentation fix for with and register in the ec2_vol example --- cloud/amazon/ec2_vol.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 3065b550457..02bb540773f 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -160,8 +160,8 @@ EXAMPLES = ''' instance: "{{ item.id }}" name: my_existing_volume_Name_tag device_name: /dev/xvdf - with_items: ec2.instances - register: ec2_vol + with_items: ec2.instances + register: ec2_vol # Remove a volume - ec2_vol: From fbbc74d5fe6beb7dadba5b5bad9362914fe99109 Mon Sep 17 00:00:00 2001 From: Benno Joy Date: Thu, 18 Jun 2015 21:24:22 +0530 Subject: [PATCH 261/464] enable the module to provision windows instances --- cloud/azure/azure.py | 81 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 74 insertions(+), 7 deletions(-) diff --git a/cloud/azure/azure.py b/cloud/azure/azure.py index 98984dfb9e6..59f425399cc 100644 --- a/cloud/azure/azure.py +++ b/cloud/azure/azure.py @@ -110,6 +110,34 @@ options: required: false default: 'present' aliases: [] + reset_pass_atlogon: + description: + - Reset the admin password on first logon for windows hosts + required: false + default: "no" + version_added: "2.0" + choices: [ "yes", "no" ] + auto_updates: + description: + - Enable Auto Updates on Windows Machines + required: false + version_added: "2.0" + default: "no" + choices: [ "yes", "no" ] + enable_winrm: + description: + - Enable winrm on Windows Machines + required: false + version_added: "2.0" + default: "yes" + choices: [ "yes", "no" ] + os_type: + description: + - The type of the os that is gettings provisioned + required: false + version_added: "2.0" + default: "linux" + choices: [ "windows", "linux" ] requirements: - "python >= 2.6" @@ -138,6 +166,29 @@ EXAMPLES = ''' module: azure name: my-virtual-machine state: absent + +#Create windows machine +- hosts: all + connection: local + tasks: + - local_action: + module: azure + name: "ben-Winows-23" + hostname: "win123" + os_type: windows + enable_winrm: yes + subscription_id: "{{ azure_sub_id }}" + management_cert_path: "{{ azure_cert_path }}" + role_size: Small + image: 'bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-v13.5' + location: 'East Asia' + password: "xxx" + storage_account: benooytes + user: admin + wait: yes + virtual_network_name: "{{ vnet_name }}" + + ''' import base64 @@ -196,7 +247,7 @@ try: from azure import WindowsAzureError, WindowsAzureMissingResourceError from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys, PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints, - ConfigurationSetInputEndpoint) + ConfigurationSetInputEndpoint, Listener, WindowsConfigurationSet) HAS_AZURE = True except ImportError: HAS_AZURE = False @@ -264,6 +315,7 @@ def create_virtual_machine(module, azure): True if a new virtual machine and/or cloud service was created, false otherwise """ name = module.params.get('name') + os_type = module.params.get('os_type') hostname = module.params.get('hostname') or name + ".cloudapp.net" endpoints = module.params.get('endpoints').split(',') ssh_cert_path = module.params.get('ssh_cert_path') @@ -295,10 +347,21 @@ def create_virtual_machine(module, azure): azure.get_role(name, name, name) except WindowsAzureMissingResourceError: # vm does not exist; create it - - # Create linux configuration - disable_ssh_password_authentication = not password - linux_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication) + + if os_type == 'linux': + # Create linux configuration + disable_ssh_password_authentication = not password + vm_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication) + else: + #Create Windows Config + vm_config = WindowsConfigurationSet(hostname, password, module.params.get('reset_pass_atlogon'),\ + module.params.get('auto_updates'), None, user) + vm_config.domain_join = None + if module.params.get('enable_winrm'): + listener = Listener('Http') + vm_config.win_rm.listeners.listeners.append(listener) + else: + vm_config.win_rm = None # Add ssh certificates if specified if ssh_cert_path: @@ -340,7 +403,7 @@ def create_virtual_machine(module, azure): deployment_slot='production', label=name, role_name=name, - system_config=linux_config, + system_config=vm_config, network_config=network_config, os_virtual_hard_disk=os_hd, role_size=role_size, @@ -448,6 +511,7 @@ def main(): ssh_cert_path=dict(), name=dict(), hostname=dict(), + os_type=dict(default='linux', choices=['linux', 'windows']), location=dict(choices=AZURE_LOCATIONS), role_size=dict(choices=AZURE_ROLE_SIZES), subscription_id=dict(no_log=True), @@ -461,7 +525,10 @@ def main(): state=dict(default='present'), wait=dict(type='bool', default=False), wait_timeout=dict(default=600), - wait_timeout_redirects=dict(default=300) + wait_timeout_redirects=dict(default=300), + reset_pass_atlogon=dict(type='bool', default=False), + auto_updates=dict(type='bool', default=False), + enable_winrm=dict(type='bool', default=True), ) ) if not HAS_AZURE: From 762f4f25f75ba9be0ad607ae3b42a781aa4989c5 Mon Sep 17 00:00:00 2001 From: Benno Joy Date: Thu, 18 Jun 2015 23:00:59 +0530 Subject: [PATCH 262/464] enable azure to provision windows instances --- cloud/azure/azure.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/azure/azure.py b/cloud/azure/azure.py index 59f425399cc..5469e385da1 100644 --- a/cloud/azure/azure.py +++ b/cloud/azure/azure.py @@ -376,7 +376,7 @@ def create_virtual_machine(module, azure): authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint)) # Append ssh config to linux machine config - linux_config.ssh = ssh_config + vm_config.ssh = ssh_config # Create network configuration network_config = ConfigurationSetInputEndpoints() @@ -546,7 +546,7 @@ def main(): cloud_service_raw = None if module.params.get('state') == 'absent': (changed, public_dns_name, deployment) = terminate_virtual_machine(module, azure) - + elif module.params.get('state') == 'present': # Changed is always set to true when provisioning new instances if not module.params.get('name'): From cc039d3b30cc67ecdaf8798f9e910fae7a893948 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 18 Jun 2015 13:12:50 -0500 Subject: [PATCH 263/464] Fix some remaining rax asg idempotency checks --- cloud/rackspace/rax_scaling_group.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/rackspace/rax_scaling_group.py b/cloud/rackspace/rax_scaling_group.py index b949d2e772b..e6c14fdef0f 100644 --- a/cloud/rackspace/rax_scaling_group.py +++ b/cloud/rackspace/rax_scaling_group.py @@ -273,7 +273,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, disk_config = disk_config or 'AUTO' if ((disk_config or lc.get('disk_config')) and - disk_config != lc.get('disk_config')): + disk_config != lc.get('disk_config', 'AUTO')): lc_args['disk_config'] = disk_config if (meta or lc.get('meta')) and meta != lc.get('metadata'): @@ -299,7 +299,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, if key_name != lc.get('key_name'): lc_args['key_name'] = key_name - if config_drive != lc.get('config_drive'): + if config_drive != lc.get('config_drive', False): lc_args['config_drive'] = config_drive if (user_data and From 5b34f3d8bc36bef93036bbcde78891c726fb52dd Mon Sep 17 00:00:00 2001 From: Vyronas Tsingaras Date: Thu, 18 Jun 2015 21:33:16 +0300 Subject: [PATCH 264/464] Remember power state when starting vm reconfigure This commit fixes #1391 Signed-off-by: Vyronas Tsingaras --- cloud/vmware/vsphere_guest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 3142936062d..33d8fc4abc0 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -571,6 +571,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name changes = {} request = VI.ReconfigVM_TaskRequestMsg() shutdown = False + poweron = vm.is_powered_on() memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled) cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled) @@ -661,7 +662,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name module.fail_json( msg="Error reconfiguring vm: %s" % task.get_error_message()) - if vm.is_powered_off(): + if vm.is_powered_off() and poweron: try: vm.power_on(sync_run=True) except Exception, e: From 93c2f9245aa3c75272df7547fac3d68718e80243 Mon Sep 17 00:00:00 2001 From: Jon Hawkesworth Date: Fri, 19 Jun 2015 08:52:43 +0100 Subject: [PATCH 265/464] document file size limit for win_copy module --- windows/win_copy.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/windows/win_copy.py b/windows/win_copy.py index 54f035b1851..efdebc5a4a6 100644 --- a/windows/win_copy.py +++ b/windows/win_copy.py @@ -47,9 +47,15 @@ options: default: null author: "Jon Hawkesworth (@jhawkesworth)" notes: - - The "win_copy" module recursively copy facility does not scale to lots (>hundreds) of files. - Instead, you may find it better to create files locally, perhaps using win_template, and - then use win_get_url to fetch them from your managed hosts into the correct location. + - The "win_copy" module is best used for small files only. + This module should **not** be used for files bigger than 3Mb as + this will result in a 500 response from the winrm host + and it will not be possible to connect via winrm again until the + windows remote management service has been restarted on the + windows host. + Files larger than 1Mb will take minutes to transfer. + The recommended way to transfer large files is using win_get_url + or collecting from a windows file share folder. ''' EXAMPLES = ''' From da62d04eff03e1347ec9c5ba5a3b6f75bb228633 Mon Sep 17 00:00:00 2001 From: Vyronas Tsingaras Date: Fri, 19 Jun 2015 13:04:19 +0300 Subject: [PATCH 266/464] This enable one to find a datastore with no config_target supplied Signed-off-by: Vyronas Tsingaras --- cloud/vmware/vsphere_guest.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 33d8fc4abc0..eba8466dd2b 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -417,13 +417,21 @@ def add_nic(module, s, nfmor, config, devices, nic_type="vmxnet3", network_name= def find_datastore(module, s, datastore, config_target): # Verify the datastore exists and put it in brackets if it does. ds = None - for d in config_target.Datastore: - if (d.Datastore.Accessible and - (datastore and d.Datastore.Name == datastore) - or (not datastore)): - ds = d.Datastore.Datastore - datastore = d.Datastore.Name - break + if config_target: + for d in config_target.Datastore: + if (d.Datastore.Accessible and + (datastore and d.Datastore.Name == datastore) + or (not datastore)): + ds = d.Datastore.Datastore + datastore = d.Datastore.Name + break + else: + for ds_mor, ds_name in server.get_datastores().items(): + ds_props = VIProperty(s, ds_mor) + if (ds_props.summary.accessible and (datastore and ds_name == datastore) + or (not datastore)): + ds = ds_mor + datastore = ds_name if not ds: s.disconnect() module.fail_json(msg="Datastore: %s does not appear to exist" % From 7a3383260620078fe4e797c3aad0588de71d3005 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 19 Jun 2015 15:29:43 -0400 Subject: [PATCH 267/464] fixed typo --- packaging/language/gem.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/gem.py b/packaging/language/gem.py index 7bfcca83abd..d058193624a 100644 --- a/packaging/language/gem.py +++ b/packaging/language/gem.py @@ -215,7 +215,7 @@ def main(): state = dict(required=False, default='present', choices=['present','absent','latest'], type='str'), user_install = dict(required=False, default=True, type='bool'), pre_release = dict(required=False, default=False, type='bool'), - include_doc = dict(required=False, default=False, type-'bool'), + include_doc = dict(required=False, default=False, type='bool'), version = dict(required=False, type='str'), build_flags = dict(required=False, type='str'), ), From d7f65af6d934759b1c53bbeef010d03d99da241b Mon Sep 17 00:00:00 2001 From: dagnello Date: Fri, 19 Jun 2015 10:45:12 -0700 Subject: [PATCH 268/464] Resolving secgroup.id issue in this module secgroup['id'] was not being returned in all cases where the specified security group exists. --- cloud/openstack/os_security_group.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index 51e7df772a1..86e6de0b023 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -48,6 +48,8 @@ options: - Should the resource be present or absent. choices: [present, absent] default: present + +requirements: ["shade"] ''' EXAMPLES = ''' @@ -114,24 +116,24 @@ def main(): if module.check_mode: module.exit_json(changed=_system_state_change(module, secgroup)) - changed = False if state == 'present': if not secgroup: secgroup = cloud.create_security_group(name, description) - changed = True + module.exit_json(changed=True, id=secgroup['id']) else: if _needs_update(module, secgroup): secgroup = cloud.update_security_group( secgroup['id'], description=description) - changed = True - module.exit_json( - changed=changed, id=secgroup.id, secgroup=secgroup) + module.exit_json(changed=True, id=secgroup['id']) + else: + module.exit_json(changed=False, id=secgroup['id']) if state == 'absent': - if secgroup: + if not secgroup: + module.exit_json(changed=False) + else: cloud.delete_security_group(secgroup['id']) - changed=True - module.exit_json(changed=changed) + module.exit_json(changed=True) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From 94a8b6dcccfcceb6cbec876ad957ed2c0a105c19 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 19 Jun 2015 17:06:12 -0400 Subject: [PATCH 269/464] Make sure we're always returning objects too --- cloud/openstack/os_security_group.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index 86e6de0b023..7fba28c8cb9 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -116,24 +116,24 @@ def main(): if module.check_mode: module.exit_json(changed=_system_state_change(module, secgroup)) + changed = False if state == 'present': if not secgroup: secgroup = cloud.create_security_group(name, description) - module.exit_json(changed=True, id=secgroup['id']) + changed = True else: if _needs_update(module, secgroup): secgroup = cloud.update_security_group( secgroup['id'], description=description) - module.exit_json(changed=True, id=secgroup['id']) - else: - module.exit_json(changed=False, id=secgroup['id']) + changed = True + module.exit_json( + changed=changed, id=secgroup['id'], secgroup=secgroup) if state == 'absent': - if not secgroup: - module.exit_json(changed=False) - else: + if secgroup: cloud.delete_security_group(secgroup['id']) - module.exit_json(changed=True) + changed = True + module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From 850ed126e2500265a4f43c5ee5c8aa00de39796a Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 19 Jun 2015 17:39:57 -0400 Subject: [PATCH 270/464] Remove duplicate shade requirement --- cloud/openstack/os_security_group.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index 7fba28c8cb9..e42b7f938f5 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -48,8 +48,6 @@ options: - Should the resource be present or absent. choices: [present, absent] default: present - -requirements: ["shade"] ''' EXAMPLES = ''' From 9040c2f75cf3b1a36934ad7cf46a66ada211de71 Mon Sep 17 00:00:00 2001 From: murdochr Date: Sat, 20 Jun 2015 21:36:10 +0100 Subject: [PATCH 271/464] Change docs to reflect correct when syntax for matching variable strings as per MD's forum post as this fails with unhelpful error otherwise. https://groups.google.com/forum/#!topic/ansible-project/D2hQzZ_jNuM --- network/basics/uri.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index b7fa8282c83..3de17c12d60 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -156,7 +156,7 @@ EXAMPLES = ''' register: webpage - action: fail - when: 'AWESOME' not in "{{ webpage.content }}" + when: "'illustrative' not in webpage.content" # Create a JIRA issue From 9d833d1d4c1c12e846ae70fff50e54bd2d322e36 Mon Sep 17 00:00:00 2001 From: Hagai Kariti Date: Tue, 30 Sep 2014 11:13:54 +0300 Subject: [PATCH 272/464] Hostname module should update ansible_hostname --- system/hostname.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/hostname.py b/system/hostname.py index 882402a5e21..d9193641eb2 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -509,6 +509,6 @@ def main(): hostname.set_permanent_hostname(name) changed = True - module.exit_json(changed=changed, name=name) + module.exit_json(changed=changed, name=name, ansible_facts=dict(ansible_hostname=name)) main() From 1cfa21829b73b3c3ecbd55e273381842e4a495cd Mon Sep 17 00:00:00 2001 From: Michal Smereczynski Date: Mon, 22 Jun 2015 13:10:09 +0200 Subject: [PATCH 273/464] Added new Premium Storage instance sizes and case-related documentation clarification. --- cloud/azure/azure.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/cloud/azure/azure.py b/cloud/azure/azure.py index 5469e385da1..f1eea46525e 100644 --- a/cloud/azure/azure.py +++ b/cloud/azure/azure.py @@ -53,7 +53,7 @@ options: default: null role_size: description: - - azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6) + - azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6). You have to pay attention to the fact that instances of type G and DS are not available in all regions (locations). Make sure if you selected the size and type of instance available in your chosen location. required: false default: Small endpoints: @@ -235,6 +235,14 @@ AZURE_ROLE_SIZES = ['ExtraSmall', 'Standard_D12', 'Standard_D13', 'Standard_D14', + 'Standard_DS1', + 'Standard_DS2', + 'Standard_DS3', + 'Standard_DS4', + 'Standard_DS11', + 'Standard_DS12', + 'Standard_DS13', + 'Standard_DS14', 'Standard_G1', 'Standard_G2', 'Standard_G3', From 74d9f74536e3ee21445a4e9cd4e33ae773590348 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Mon, 22 Jun 2015 14:52:45 +0100 Subject: [PATCH 274/464] Parse out space characters in route53 value list Fixes: https://github.com/ansible/ansible-modules-core/issues/992 --- cloud/amazon/route53.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 67700060d9f..d25be6b99ea 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -224,7 +224,7 @@ def main(): if type(value_in) is str: if value_in: - value_list = sorted(value_in.split(',')) + value_list = sorted([s.strip() for s in value_in.split(',')]) elif type(value_in) is list: value_list = sorted(value_in) From 3b4b065315072207537d01770a79584e2a01d0a4 Mon Sep 17 00:00:00 2001 From: Bryan Fleming Date: Wed, 6 May 2015 16:44:40 -0500 Subject: [PATCH 275/464] fixes #1120 - privileges using columns --- database/mysql/mysql_user.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 763e0e7ebd5..0ff290f1185 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -157,6 +157,7 @@ password=n<_665{vS43y import getpass import tempfile +import re try: import MySQLdb except ImportError: @@ -316,13 +317,19 @@ def privileges_unpack(priv): not specified in the string, as MySQL will always provide this by default. """ output = {} + privs = [] for item in priv.strip().split('/'): pieces = item.strip().split(':') dbpriv = pieces[0].rsplit(".", 1) pieces[0] = "`%s`.%s" % (dbpriv[0].strip('`'), dbpriv[1]) - - output[pieces[0]] = [s.strip() for s in pieces[1].upper().split(',')] - new_privs = frozenset(output[pieces[0]]) + if '(' in pieces[1]: + output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper()) + for i in output[pieces[0]]: + privs.append(re.sub(r'\(.*\)','',i)) + else: + output[pieces[0]] = pieces[1].upper().split(',') + privs = output[pieces[0]] + new_privs = frozenset(privs) if not new_privs.issubset(VALID_PRIVS): raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS)) From 2f8300087e54f57cf3482cb75ce7633b805d9fbb Mon Sep 17 00:00:00 2001 From: Andrea Mandolo Date: Tue, 23 Jun 2015 07:14:30 +0200 Subject: [PATCH 276/464] Added "EC2 instance" termination_protection and source_dest_check changeability at run-time --- cloud/amazon/ec2.py | 53 +++++++++++++++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 14 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 20d49ce5995..dc7d5d38dd3 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -190,6 +190,13 @@ options: required: false default: yes choices: [ "yes", "no" ] + termination_protection: + version_added: "2.0" + description: + - Enable or Disable the Termination Protection + required: false + default: no + choices: [ "yes", "no" ] state: version_added: "1.3" description: @@ -786,6 +793,7 @@ def create_instances(module, ec2, vpc, override_count=None): exact_count = module.params.get('exact_count') count_tag = module.params.get('count_tag') source_dest_check = module.boolean(module.params.get('source_dest_check')) + termination_protection = module.boolean(module.params.get('termination_protection')) # group_id and group_name are exclusive of each other if group_id and group_name: @@ -1014,11 +1022,16 @@ def create_instances(module, ec2, vpc, override_count=None): for res in res_list: running_instances.extend(res.instances) - # Enabled by default by Amazon - if not source_dest_check: + # Enabled by default by AWS + if source_dest_check is False: for inst in res.instances: inst.modify_attribute('sourceDestCheck', False) + # Disabled by default by AWS + if termination_protection is True: + for inst in res.instances: + inst.modify_attribute('disableApiTermination', True) + # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound if instance_tags: try: @@ -1135,21 +1148,32 @@ def startstop_instances(module, ec2, instance_ids, state): if not isinstance(instance_ids, list) or len(instance_ids) < 1: module.fail_json(msg='instance_ids should be a list of instances, aborting') - # Check that our instances are not in the state we want to take them to - # and change them to our desired state + # Check (and eventually change) instances attributes and instances state running_instances_array = [] for res in ec2.get_all_instances(instance_ids): for inst in res.instances: - if inst.state != state: - instance_dict_array.append(get_instance_info(inst)) - try: - if state == 'running': - inst.start() - else: - inst.stop() - except EC2ResponseError, e: - module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) - changed = True + + # Check "source_dest_check" attribute + if inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check: + inst.modify_attribute('sourceDestCheck', source_dest_check) + changed = True + + # Check "termination_protection" attribute + if inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection: + inst.modify_attribute('disableApiTermination', termination_protection) + changed = True + + # Check instance state + if inst.state != state: + instance_dict_array.append(get_instance_info(inst)) + try: + if state == 'running': + inst.start() + else: + inst.stop() + except EC2ResponseError, e: + module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) + changed = True ## Wait for all the instances to finish starting or stopping wait_timeout = time.time() + wait_timeout @@ -1200,6 +1224,7 @@ def main(): instance_profile_name = dict(), instance_ids = dict(type='list', aliases=['instance_id']), source_dest_check = dict(type='bool', default=True), + termination_protection = dict(type='bool', default=False), state = dict(default='present', choices=['present', 'absent', 'running', 'stopped']), exact_count = dict(type='int', default=None), count_tag = dict(), From fe4884e8f09b216f298b4fefdc26084a8be8930f Mon Sep 17 00:00:00 2001 From: Andrea Mandolo Date: Mon, 22 Jun 2015 17:13:42 +0200 Subject: [PATCH 277/464] Added some block_device_mapping (disks) informations to EC2 instance module ouput --- cloud/amazon/ec2.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index dc7d5d38dd3..ad2f8f8f71b 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -619,6 +619,19 @@ def get_instance_info(inst): except AttributeError: instance_info['ebs_optimized'] = False + try: + bdm_dict = {} + bdm = getattr(inst, 'block_device_mapping') + for device_name in bdm.keys(): + bdm_dict[device_name] = { + 'status': bdm[device_name].status, + 'volume_id': bdm[device_name].volume_id, + 'delete_on_termination': bdm[device_name].delete_on_termination + } + instance_info['block_device_mapping'] = bdm_dict + except AttributeError: + instance_info['block_device_mapping'] = False + try: instance_info['tenancy'] = getattr(inst, 'placement_tenancy') except AttributeError: From cee4ef0fc3a6e4f21ec9787b2ce002aa4a05bd91 Mon Sep 17 00:00:00 2001 From: dagnello Date: Fri, 19 Jun 2015 10:45:12 -0700 Subject: [PATCH 278/464] Resolving secgroup.id issue in this module secgroup['id'] was not being returned in all cases where the specified security group exists. --- cloud/openstack/os_security_group.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index 51e7df772a1..86e6de0b023 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -48,6 +48,8 @@ options: - Should the resource be present or absent. choices: [present, absent] default: present + +requirements: ["shade"] ''' EXAMPLES = ''' @@ -114,24 +116,24 @@ def main(): if module.check_mode: module.exit_json(changed=_system_state_change(module, secgroup)) - changed = False if state == 'present': if not secgroup: secgroup = cloud.create_security_group(name, description) - changed = True + module.exit_json(changed=True, id=secgroup['id']) else: if _needs_update(module, secgroup): secgroup = cloud.update_security_group( secgroup['id'], description=description) - changed = True - module.exit_json( - changed=changed, id=secgroup.id, secgroup=secgroup) + module.exit_json(changed=True, id=secgroup['id']) + else: + module.exit_json(changed=False, id=secgroup['id']) if state == 'absent': - if secgroup: + if not secgroup: + module.exit_json(changed=False) + else: cloud.delete_security_group(secgroup['id']) - changed=True - module.exit_json(changed=changed) + module.exit_json(changed=True) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From 887b88ea73aaf1ed81fc15398b004674b15f3ec3 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 19 Jun 2015 17:06:12 -0400 Subject: [PATCH 279/464] Make sure we're always returning objects too --- cloud/openstack/os_security_group.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index 86e6de0b023..7fba28c8cb9 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -116,24 +116,24 @@ def main(): if module.check_mode: module.exit_json(changed=_system_state_change(module, secgroup)) + changed = False if state == 'present': if not secgroup: secgroup = cloud.create_security_group(name, description) - module.exit_json(changed=True, id=secgroup['id']) + changed = True else: if _needs_update(module, secgroup): secgroup = cloud.update_security_group( secgroup['id'], description=description) - module.exit_json(changed=True, id=secgroup['id']) - else: - module.exit_json(changed=False, id=secgroup['id']) + changed = True + module.exit_json( + changed=changed, id=secgroup['id'], secgroup=secgroup) if state == 'absent': - if not secgroup: - module.exit_json(changed=False) - else: + if secgroup: cloud.delete_security_group(secgroup['id']) - module.exit_json(changed=True) + changed = True + module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From 1ae299d00ff2fe18abd7b1d01b18f384301afccf Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 19 Jun 2015 17:39:57 -0400 Subject: [PATCH 280/464] Remove duplicate shade requirement --- cloud/openstack/os_security_group.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index 7fba28c8cb9..e42b7f938f5 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -48,8 +48,6 @@ options: - Should the resource be present or absent. choices: [present, absent] default: present - -requirements: ["shade"] ''' EXAMPLES = ''' From 84fd824f75ca30b0854634f6a99e7c0cdb90a029 Mon Sep 17 00:00:00 2001 From: murdochr Date: Sat, 20 Jun 2015 21:36:10 +0100 Subject: [PATCH 281/464] Change docs to reflect correct when syntax for matching variable strings as per MD's forum post as this fails with unhelpful error otherwise. https://groups.google.com/forum/#!topic/ansible-project/D2hQzZ_jNuM --- network/basics/uri.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index b7fa8282c83..3de17c12d60 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -156,7 +156,7 @@ EXAMPLES = ''' register: webpage - action: fail - when: 'AWESOME' not in "{{ webpage.content }}" + when: "'illustrative' not in webpage.content" # Create a JIRA issue From 87404fa7987b182c1ccc05197656140174da54e2 Mon Sep 17 00:00:00 2001 From: Hagai Kariti Date: Tue, 30 Sep 2014 11:13:54 +0300 Subject: [PATCH 282/464] Hostname module should update ansible_hostname --- system/hostname.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/hostname.py b/system/hostname.py index 882402a5e21..d9193641eb2 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -509,6 +509,6 @@ def main(): hostname.set_permanent_hostname(name) changed = True - module.exit_json(changed=changed, name=name) + module.exit_json(changed=changed, name=name, ansible_facts=dict(ansible_hostname=name)) main() From e93b5c672476b34fd81327c3976c92b74d57c0d7 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Mon, 22 Jun 2015 14:52:45 +0100 Subject: [PATCH 283/464] Parse out space characters in route53 value list Fixes: https://github.com/ansible/ansible-modules-core/issues/992 --- cloud/amazon/route53.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 67700060d9f..d25be6b99ea 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -224,7 +224,7 @@ def main(): if type(value_in) is str: if value_in: - value_list = sorted(value_in.split(',')) + value_list = sorted([s.strip() for s in value_in.split(',')]) elif type(value_in) is list: value_list = sorted(value_in) From 617d5750a6be96d68e5412832a79c0fa8229ffbc Mon Sep 17 00:00:00 2001 From: Andrea Mandolo Date: Tue, 23 Jun 2015 07:14:30 +0200 Subject: [PATCH 284/464] Added "EC2 instance" termination_protection and source_dest_check changeability at run-time --- cloud/amazon/ec2.py | 53 +++++++++++++++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 14 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 20d49ce5995..dc7d5d38dd3 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -190,6 +190,13 @@ options: required: false default: yes choices: [ "yes", "no" ] + termination_protection: + version_added: "2.0" + description: + - Enable or Disable the Termination Protection + required: false + default: no + choices: [ "yes", "no" ] state: version_added: "1.3" description: @@ -786,6 +793,7 @@ def create_instances(module, ec2, vpc, override_count=None): exact_count = module.params.get('exact_count') count_tag = module.params.get('count_tag') source_dest_check = module.boolean(module.params.get('source_dest_check')) + termination_protection = module.boolean(module.params.get('termination_protection')) # group_id and group_name are exclusive of each other if group_id and group_name: @@ -1014,11 +1022,16 @@ def create_instances(module, ec2, vpc, override_count=None): for res in res_list: running_instances.extend(res.instances) - # Enabled by default by Amazon - if not source_dest_check: + # Enabled by default by AWS + if source_dest_check is False: for inst in res.instances: inst.modify_attribute('sourceDestCheck', False) + # Disabled by default by AWS + if termination_protection is True: + for inst in res.instances: + inst.modify_attribute('disableApiTermination', True) + # Leave this as late as possible to try and avoid InvalidInstanceID.NotFound if instance_tags: try: @@ -1135,21 +1148,32 @@ def startstop_instances(module, ec2, instance_ids, state): if not isinstance(instance_ids, list) or len(instance_ids) < 1: module.fail_json(msg='instance_ids should be a list of instances, aborting') - # Check that our instances are not in the state we want to take them to - # and change them to our desired state + # Check (and eventually change) instances attributes and instances state running_instances_array = [] for res in ec2.get_all_instances(instance_ids): for inst in res.instances: - if inst.state != state: - instance_dict_array.append(get_instance_info(inst)) - try: - if state == 'running': - inst.start() - else: - inst.stop() - except EC2ResponseError, e: - module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) - changed = True + + # Check "source_dest_check" attribute + if inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check: + inst.modify_attribute('sourceDestCheck', source_dest_check) + changed = True + + # Check "termination_protection" attribute + if inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection: + inst.modify_attribute('disableApiTermination', termination_protection) + changed = True + + # Check instance state + if inst.state != state: + instance_dict_array.append(get_instance_info(inst)) + try: + if state == 'running': + inst.start() + else: + inst.stop() + except EC2ResponseError, e: + module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e)) + changed = True ## Wait for all the instances to finish starting or stopping wait_timeout = time.time() + wait_timeout @@ -1200,6 +1224,7 @@ def main(): instance_profile_name = dict(), instance_ids = dict(type='list', aliases=['instance_id']), source_dest_check = dict(type='bool', default=True), + termination_protection = dict(type='bool', default=False), state = dict(default='present', choices=['present', 'absent', 'running', 'stopped']), exact_count = dict(type='int', default=None), count_tag = dict(), From 6cdfbb72f01468192965e45ce6c019b2ea44ea65 Mon Sep 17 00:00:00 2001 From: Andrea Mandolo Date: Mon, 22 Jun 2015 17:13:42 +0200 Subject: [PATCH 285/464] Added some block_device_mapping (disks) informations to EC2 instance module ouput --- cloud/amazon/ec2.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index dc7d5d38dd3..ad2f8f8f71b 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -619,6 +619,19 @@ def get_instance_info(inst): except AttributeError: instance_info['ebs_optimized'] = False + try: + bdm_dict = {} + bdm = getattr(inst, 'block_device_mapping') + for device_name in bdm.keys(): + bdm_dict[device_name] = { + 'status': bdm[device_name].status, + 'volume_id': bdm[device_name].volume_id, + 'delete_on_termination': bdm[device_name].delete_on_termination + } + instance_info['block_device_mapping'] = bdm_dict + except AttributeError: + instance_info['block_device_mapping'] = False + try: instance_info['tenancy'] = getattr(inst, 'placement_tenancy') except AttributeError: From a1538b490ed71fc291035daa4aaf184369e3fa86 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 23 Jun 2015 08:57:06 -0700 Subject: [PATCH 286/464] Fix documentation --- cloud/amazon/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index ad2f8f8f71b..6d47fa6ac32 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -208,7 +208,7 @@ options: volumes: version_added: "1.5" description: - - a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. Encrypt the volume by passing 'encrypted: true' in the volume dict. + - "a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. Encrypt the volume by passing 'encrypted: true' in the volume dict." required: false default: null aliases: [] From baff1bf7f0b49e2b4bc9f2c0582a1d356df160d9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Tue, 23 Jun 2015 13:16:28 -0400 Subject: [PATCH 287/464] Update choices and version_added for new gce.py param service_account_permissions --- cloud/google/gce.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/google/gce.py b/cloud/google/gce.py index 48536057637..251a3ee9e93 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -59,12 +59,13 @@ options: default: null aliases: [] service_account_permissions: - version_added: 1.5.1 + version_added: 2.0 description: - service account permissions (see U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), --scopes section for detailed information) required: false default: null aliases: [] + choices: ["bigquery", "cloud-platform", "compute-ro", "compute-rw", "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write", "monitoring", "sql", "sql-admin", "storage-full", "storage-ro", "storage-rw", "taskqueue", "userinfo-email"] pem_file: version_added: 1.5.1 description: From 207abb6f5c7e9d1d50dc52e0ee4cc04d192912fa Mon Sep 17 00:00:00 2001 From: Evan Carter Date: Tue, 23 Jun 2015 14:08:43 -0400 Subject: [PATCH 288/464] Add ClassicLink settings to EC2_launchconfig --- cloud/amazon/ec2_lc.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 3c292377a58..6c5e2c1dd4c 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -116,6 +116,18 @@ options: default: false aliases: [] version_added: "1.8" + classic_link_vpc_id: + description: + - Id of ClassicLink enabled VPC + required: false + default: null + version_added: "2.0" + classic_link_vpc_security_groups" + description: + - A list of security group id’s with which to associate the ClassicLink VPC instances. + required: false + default: null + version_added: "2.0" extends_documentation_fragment: aws """ @@ -184,6 +196,8 @@ def create_launch_config(connection, module): ramdisk_id = module.params.get('ramdisk_id') instance_profile_name = module.params.get('instance_profile_name') ebs_optimized = module.params.get('ebs_optimized') + classic_link_vpc_id = module.params.get('classic_link_vpc_id') + classic_link_vpc_security_groups = module.params.get('classic_link_vpc_security_groups') bdm = BlockDeviceMapping() if volumes: @@ -206,10 +220,12 @@ def create_launch_config(connection, module): kernel_id=kernel_id, spot_price=spot_price, instance_monitoring=instance_monitoring, - associate_public_ip_address = assign_public_ip, + associate_public_ip_address=assign_public_ip, ramdisk_id=ramdisk_id, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, + classic_link_vpc_security_groups=classic_link_vpc_security_groups, + classic_link_vpc_id=classic_link_vpc_id, ) launch_configs = connection.get_all_launch_configurations(names=[name]) @@ -257,7 +273,9 @@ def main(): ebs_optimized=dict(default=False, type='bool'), associate_public_ip_address=dict(type='bool'), instance_monitoring=dict(default=False, type='bool'), - assign_public_ip=dict(type='bool') + assign_public_ip=dict(type='bool'), + classic_link_vpc_security_groups=dict(type='list'), + classic_link_vpc_id=dict(type='str') ) ) From c6f9e08282b7eefc2f7f2825df369d0099c2c3b2 Mon Sep 17 00:00:00 2001 From: Jonathan Davila Date: Mon, 13 Apr 2015 21:22:11 -0400 Subject: [PATCH 289/464] new vpc module. does not contain subnet or route table functionality. changed name to ec2_vpc_net refactored out IGW functionality --- cloud/amazon/ec2_vpc_net.py | 344 ++++++++++++++++++++++++++++++++++++ 1 file changed, 344 insertions(+) create mode 100644 cloud/amazon/ec2_vpc_net.py diff --git a/cloud/amazon/ec2_vpc_net.py b/cloud/amazon/ec2_vpc_net.py new file mode 100644 index 00000000000..33c711e7683 --- /dev/null +++ b/cloud/amazon/ec2_vpc_net.py @@ -0,0 +1,344 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: ec2_vpc_net +short_description: configure AWS virtual private clouds +description: + - Create or terminates AWS virtual private clouds. This module has a dependency on python-boto. +version_added: "2.0" +options: + name: + description: + - The name to give your VPC. This is used in combination with the cidr_block paramater to determine if a VPC already exists. + required: yes + cidr_block: + description: + - The CIDR of the VPC + required: yes + aliases: [] + tenancy: + description: + - Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created. + required: false + default: default + dns_support: + description: + - Whether to enable AWS DNS support. + required: false + default: true + dns_hostnames: + description: + - Whether to enable AWS hostname support. + required: false + default: true + dhcp_id: + description: + - the id of the DHCP options to use for this vpc + default: null + required: false + tags: + description: + - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different. + default: None + required: false + state: + description: + - The state of the VPC. Either absent or present. + default: present + required: false + multi_ok: + description: + - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created. + default: false + required: false +author: Jonathan Davila +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Create a VPC with dedicate tenancy and a couple of tags + +- ec2_vpc: + name: Module_dev2 + cidr_block: 170.10.0.0/16 + region: us-east-1 + tags: + new_vpc: ec2_vpc_module + this: works22 + tenancy: dedicated + +''' + + +import time +import sys + +try: + import boto + import boto.ec2 + import boto.vpc + from boto.exception import EC2ResponseError + + HAS_BOTO=True +except ImportError: + HAS_BOTO=False + +def boto_exception(err): + '''generic error message handler''' + if hasattr(err, 'error_message'): + error = err.error_message + elif hasattr(err, 'message'): + error = err.message + else: + error = '%s: %s' % (Exception, err) + + return error + +def vpc_exists(module, vpc, name, cidr_block, multi): + """Returns True or False in regards to the existance of a VPC. When supplied + with a CIDR, it will check for matching tags to determine if it is a match + otherwise it will assume the VPC does not exist and thus return false. + """ + exists=False + matched_vpc=None + + try: + matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block}) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + if len(matching_vpcs) == 1 and not multi: + exists=True + matched_vpc=str(matching_vpcs).split(':')[1].split(']')[0] + elif len(matching_vpcs) > 1 and not multi: + module.fail_json(msg='Currently there are %d VPCs that have the same name and ' + 'CIDR block you specified. If you would like to create ' + 'the VPC anyways please pass True to the multi_ok param.' % len(matching_vpcs)) + + return exists, matched_vpc + +def vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags): + """This returns True or False. Intended to run after vpc_exists. + It will check all the characteristics of the parameters passed and compare them + to the active VPC. If any discrepancy is found, it will report true, meaning that + the VPC needs to be update in order to match the specified state in the params. + """ + + update_dhcp=False + update_tags=False + dhcp_match=False + + try: + dhcp_list=vpc.get_all_dhcp_options() + + if dhcp_id is not None: + has_default=vpc.get_all_vpcs(filters={'dhcp-options-id' : 'default', 'vpc-id' : vpc_id}) + for opts in dhcp_list: + if (str(opts).split(':')[1] == dhcp_id) or has_default: + dhcp_match=True + break + else: + pass + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + if not dhcp_match or (has_default and dhcp_id != 'default'): + update_dhcp=True + + if dns_hostnames and dns_support == False: + module.fail_json('In order to enable DNS Hostnames you must have DNS support enabled') + else: + + # Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute + # which is needed in order to detect the current status of DNS options. For now we just update + # the attribute each time and is not used as a changed-factor. + try: + vpc.modify_vpc_attribute(vpc_id, enable_dns_support=dns_support) + vpc.modify_vpc_attribute(vpc_id, enable_dns_hostnames=dns_hostnames) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + if tags: + try: + current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id})) + if not set(tags.items()).issubset(set(current_tags.items())): + update_tags=True + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + return update_dhcp, update_tags + + +def update_vpc_tags(module, vpc, vpc_id, tags, name): + tags.update({'Name': name}) + try: + vpc.create_tags(vpc_id, tags) + updated_tags=dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id})) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + return updated_tags + + +def update_dhcp_opts(module, vpc, vpc_id, dhcp_id): + try: + vpc.associate_dhcp_options(dhcp_id, vpc_id) + dhcp_list=vpc.get_all_dhcp_options() + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + for opts in dhcp_list: + vpc_dhcp=vpc.get_all_vpcs(filters={'dhcp-options-id' : opts, 'vpc-id' : vpc_id}) + matched=False + if opts == dhcp_id: + matched=True + return opts + + if matched == False: + return dhcp_id + +def main(): + argument_spec=ec2_argument_spec() + argument_spec.update(dict( + name=dict(type='str', default=None, required=True), + cidr_block=dict(type='str', default=None, required=True), + tenancy=dict(choices=['default', 'dedicated'], default='default'), + dns_support=dict(type='bool', default=True), + dns_hostnames=dict(type='bool', default=True), + dhcp_opts_id=dict(type='str', default=None, required=False), + tags=dict(type='dict', required=False, default=None), + state=dict(choices=['present', 'absent'], default='present'), + region=dict(type='str', required=True), + multi_ok=dict(type='bool', default=False) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + + if not HAS_BOTO: + module.fail_json(msg='Boto is required for this module') + + name=module.params.get('name') + cidr_block=module.params.get('cidr_block') + tenancy=module.params.get('tenancy') + dns_support=module.params.get('dns_support') + dns_hostnames=module.params.get('dns_hostnames') + dhcp_id=module.params.get('dhcp_opts_id') + tags=module.params.get('tags') + state=module.params.get('state') + multi=module.params.get('multi_ok') + + changed=False + new_dhcp_opts=None + new_tags=None + update_dhcp=False + update_tags=False + + region, ec2_url, aws_connect_kwargs=get_aws_connection_info(module) + + try: + vpc=boto.vpc.connect_to_region( + region, + **aws_connect_kwargs + ) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + already_exists, vpc_id=vpc_exists(module, vpc, name, cidr_block, multi) + + if already_exists: + update_dhcp, update_tags=vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags) + if update_dhcp or update_tags: + changed=True + + try: + e_tags=dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id})) + dhcp_list=vpc.get_all_dhcp_options() + has_default=vpc.get_all_vpcs(filters={'dhcp-options-id' : 'default', 'vpc-id' : vpc_id}) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + dhcp_opts=None + + try: + for opts in dhcp_list: + if vpc.get_all_vpcs(filters={'dhcp-options-id' : opts, 'vpc-id' : vpc_id}): + dhcp_opts=opts + break + else: + pass + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + if not dhcp_opts and has_default: + dhcp_opts='default' + + if state == 'present': + + if not changed and already_exists: + module.exit_json(changed=changed, vpc_id=vpc_id) + elif changed: + if update_dhcp: + dhcp_opts=update_dhcp_opts(module, vpc, vpc_id, dhcp_id) + if update_tags: + e_tags=update_vpc_tags(module, vpc, vpc_id, tags, name) + + module.exit_json(changed=changed, name=name, dhcp_options_id=dhcp_opts, tags=e_tags) + + if not already_exists: + try: + vpc_id=str(vpc.create_vpc(cidr_block, instance_tenancy=tenancy)).split(':')[1] + vpc.create_tags(vpc_id, dict(Name=name)) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg=e_msg) + + update_dhcp, update_tags=vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags) + + if update_dhcp: + new_dhcp_opts=update_dhcp_opts(module, vpc, vpc_id, dhcp_id) + if update_tags: + new_tags=update_vpc_tags(module, vpc, vpc_id, tags, name) + module.exit_json(changed=True, name=name, vpc_id=vpc_id, dhcp_options=new_dhcp_opts, tags=new_tags) + elif state == 'absent': + if already_exists: + changed=True + try: + vpc.delete_vpc(vpc_id) + module.exit_json(changed=changed, vpc_id=vpc_id) + except Exception, e: + e_msg=boto_exception(e) + module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, " + "and/or ec2_vpc_rt modules to ensure the other components are absent." % e_msg) + else: + module.exit_json(msg="VPC is absent") +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +main() From 7dac96e41b8892cddb2682e11131251dacd40dd3 Mon Sep 17 00:00:00 2001 From: Jonathan Davila Date: Mon, 9 Mar 2015 18:52:24 -0400 Subject: [PATCH 290/464] iam certificate module boto import tweak style patch --- cloud/amazon/iam_cert.py | 294 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 294 insertions(+) create mode 100644 cloud/amazon/iam_cert.py diff --git a/cloud/amazon/iam_cert.py b/cloud/amazon/iam_cert.py new file mode 100644 index 00000000000..1f58be753c8 --- /dev/null +++ b/cloud/amazon/iam_cert.py @@ -0,0 +1,294 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +DOCUMENTATION = ''' +--- +module: iam_cert +short_description: Manage server certificates for use on ELBs and CloudFront +description: + - Allows for the management of server certificates +version_added: "2.0" +options: + name: + description: + - Name of certificate to add, update or remove. + required: true + aliases: [] + new_name: + description: + - When present, this will update the name of the cert with the value passed here. + required: false + aliases: [] + new_path: + description: + - When present, this will update the path of the cert with the value passed here. + required: false + aliases: [] + state: + description: + - Whether to create, delete certificate. When present is specified it will attempt to make an update if new_path or new_name is specified. + required: true + default: null + choices: [ "present", "absent" ] + aliases: [] + path: + description: + - When creating or updating, specify the desired path of the certificate + required: false + default: "/" + aliases: [] + cert_chain: + description: + - The path to the CA certificate chain in PEM encoded format. + required: false + default: null + aliases: [] + cert: + description: + - The path to the certificate body in PEM encoded format. + required: false + aliases: [] + key: + description: + - The path to the private key of the certificate in PEM encoded format. + dup_ok: + description: + - By default the module will not upload a certifcate that is already uploaded into AWS. If set to True, it will upload the certifcate as long as the name is unique. + required: false + default: False + aliases: [] + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_secret_key', 'secret_key' ] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: null + aliases: [ 'ec2_access_key', 'access_key' ] + + +requirements: [ "boto" ] +author: Jonathan I. Davila +extends_documentation_fragment: aws +''' + +EXAMPLES = ''' +# Basic server certificate upload +tasks: +- name: Upload Certifcate + iam_cert: + name: very_ssl + state: present + cert: somecert.pem + key: privcertkey + cert_chain: myverytrustedchain + +''' +import json +import sys +try: + import boto + import boto.iam + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + +def boto_exception(err): + '''generic error message handler''' + if hasattr(err, 'error_message'): + error = err.error_message + elif hasattr(err, 'message'): + error = err.message + else: + error = '%s: %s' % (Exception, err) + + return error + +def cert_meta(iam, name): + opath = iam.get_server_certificate(name).get_server_certificate_result.\ + server_certificate.\ + server_certificate_metadata.\ + path + ocert = iam.get_server_certificate(name).get_server_certificate_result.\ + server_certificate.\ + certificate_body + ocert_id = iam.get_server_certificate(name).get_server_certificate_result.\ + server_certificate.\ + server_certificate_metadata.\ + server_certificate_id + upload_date = iam.get_server_certificate(name).get_server_certificate_result.\ + server_certificate.\ + server_certificate_metadata.\ + upload_date + exp = iam.get_server_certificate(name).get_server_certificate_result.\ + server_certificate.\ + server_certificate_metadata.\ + expiration + return opath, ocert, ocert_id, upload_date, exp + +def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok): + update=False + if any(ct in orig_cert_names for ct in [name, new_name]): + for i_name in [name, new_name]: + if i_name is None: + continue + + if cert is not None: + try: + c_index=orig_cert_names.index(i_name) + except NameError: + continue + else: + if orig_cert_bodies[c_index] == cert: + update=True + break + elif orig_cert_bodies[c_index] != cert: + module.fail_json(changed=False, msg='A cert with the name %s already exists and' + ' has a different certificate body associated' + ' with it. Certifcates cannot have the same name') + else: + update=True + break + elif cert in orig_cert_bodies and not dup_ok: + for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies): + if crt_body == cert: + module.fail_json(changed=False, msg='This certificate already' + ' exists under the name %s' % crt_name) + + return update + + +def cert_action(module, iam, name, cpath, new_name, new_path, state, + cert, key, chain, orig_cert_names, orig_cert_bodies, dup_ok): + if state == 'present': + update = dup_check(module, iam, name, new_name, cert, orig_cert_names, + orig_cert_bodies, dup_ok) + if update: + opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name) + changed=True + if new_name and new_path: + iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path) + module.exit_json(changed=changed, original_name=name, new_name=new_name, + original_path=opath, new_path=new_path, cert_body=ocert, + upload_date=upload_date, expiration_date=exp) + elif new_name and not new_path: + iam.update_server_cert(name, new_cert_name=new_name) + module.exit_json(changed=changed, original_name=name, new_name=new_name, + cert_path=opath, cert_body=ocert, + upload_date=upload_date, expiration_date=exp) + elif not new_name and new_path: + iam.update_server_cert(name, new_path=new_path) + module.exit_json(changed=changed, name=new_name, + original_path=opath, new_path=new_path, cert_body=ocert, + upload_date=upload_date, expiration_date=exp) + else: + changed=False + module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, + upload_date=upload_date, expiration_date=exp, + msg='No new path or name specified. No changes made') + else: + changed=True + iam.upload_server_cert(name, cert, key, cert_chain=chain, path=cpath) + opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name) + module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert, + upload_date=upload_date, expiration_date=exp) + elif state == 'absent': + if name in orig_cert_names: + changed=True + iam.delete_server_cert(name) + module.exit_json(changed=changed, deleted_cert=name) + else: + changed=False + module.exit_json(changed=changed, msg='Certifcate with the name %s already absent' % name) + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + state=dict( + default=None, required=True, choices=['present', 'absent']), + name=dict(default=None, required=False), + cert=dict(default=None, required=False), + key=dict(default=None, required=False), + cert_chain=dict(default=None, required=False), + new_name=dict(default=None, required=False), + path=dict(default='/', required=False), + new_path=dict(default=None, required=False), + dup_ok=dict(default=False, required=False, choices=[False, True]) + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[], + ) + + if not HAS_BOTO: + module.fail_json(msg="Boto is required for this module") + + ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + + try: + iam = boto.iam.connection.IAMConnection( + aws_access_key_id=aws_access_key, + aws_secret_access_key=aws_secret_key, + ) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + state = module.params.get('state') + name = module.params.get('name') + path = module.params.get('path') + new_name = module.params.get('new_name') + new_path = module.params.get('new_path') + cert_chain = module.params.get('cert_chain') + dup_ok = module.params.get('dup_ok') + if state == 'present': + cert = open(module.params.get('cert'), 'r').read().rstrip() + key = open(module.params.get('key'), 'r').read().rstrip() + if cert_chain is not None: + cert_chain = open(module.params.get('cert_chain'), 'r').read() + else: + key=cert=chain=None + + orig_certs = [ctb['server_certificate_name'] for ctb in \ + iam.get_all_server_certs().\ + list_server_certificates_result.\ + server_certificate_metadata_list] + orig_bodies = [iam.get_server_certificate(thing).\ + get_server_certificate_result.\ + certificate_body \ + for thing in orig_certs] + if new_name == name: + new_name = None + if new_path == path: + new_path = None + + changed = False + try: + cert_action(module, iam, name, path, new_name, new_path, state, + cert, key, cert_chain, orig_certs, orig_bodies, dup_ok) + except boto.exception.BotoServerError, err: + module.fail_json(changed=changed, msg=str(err), debug=[cert,key]) + + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +if __name__ == '__main__': + main() From 33533eb1560ded3de4f2402c0d2c076c09bad088 Mon Sep 17 00:00:00 2001 From: billwanjohi Date: Tue, 23 Jun 2015 18:31:48 +0000 Subject: [PATCH 291/464] iam: use modern helper to allow sts previous implementation ignored the session token when present --- cloud/amazon/iam.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index a7d0fbeee5b..d2807a23b44 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -565,13 +565,10 @@ def main(): module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, " "please specificy present or absent") - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) try: - iam = boto.iam.connection.IAMConnection( - aws_access_key_id=aws_access_key, - aws_secret_access_key=aws_secret_key, - ) + iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) From 58ef71fc8467fb0f6786b200732bbc0eeb54a1ed Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Sun, 31 May 2015 19:03:35 -0400 Subject: [PATCH 292/464] add download ability to unarchive module --- files/unarchive.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/files/unarchive.py b/files/unarchive.py index 625989ffdfb..2efd48294a1 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -32,6 +32,7 @@ options: src: description: - If copy=yes (default), local path to archive file to copy to the target server; can be absolute or relative. If copy=no, path on the target server to existing archive file to unpack. + - If copy=no and src contains ://, the remote machine will download the file from the url first. (version_added 2.0) required: true default: null dest: @@ -81,6 +82,9 @@ EXAMPLES = ''' # Unarchive a file that is already on the remote machine - unarchive: src=/tmp/foo.zip dest=/usr/local/bin copy=no + +# Unarchive a file that needs to be downloaded +- unarchive: src=https://example.com/example.zip dest=/usr/local/bin copy=no ''' import re @@ -269,6 +273,25 @@ def main(): if not os.path.exists(src): if copy: module.fail_json(msg="Source '%s' failed to transfer" % src) + # If copy=false, and src= contains ://, try and download the file to a temp directory. + elif '://' in src: + tempdir = os.path.dirname(__file__) + package = os.path.join(tempdir, str(src.rsplit('/', 1)[1])) + try: + rsp, info = fetch_url(module, src) + f = open(package, 'w') + # Read 1kb at a time to save on ram + while True: + data = rsp.read(1024) + + if data == "": + break # End of file, break while loop + + f.write(data) + f.close() + src = package + except Exception, e: + module.fail_json(msg="Failure downloading %s, %s" % (src, e)) else: module.fail_json(msg="Source '%s' does not exist" % src) if not os.access(src, os.R_OK): @@ -315,5 +338,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * if __name__ == '__main__': main() From 56d4f21c5f7086a3788844a391043d6748e6ce93 Mon Sep 17 00:00:00 2001 From: Michael Weinrich Date: Fri, 19 Jun 2015 14:43:40 +0200 Subject: [PATCH 293/464] Use aws connect calls that allow boto profile use --- cloud/amazon/iam.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index c1d5ef70901..bda953faab4 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -146,6 +146,7 @@ import sys try: import boto import boto.iam + import boto.ec2 HAS_BOTO = True except ImportError: HAS_BOTO = False From 29b00ba526d18edd5a0bf2e94d8f6a55ef7ec85b Mon Sep 17 00:00:00 2001 From: zimbatm Date: Tue, 5 May 2015 16:07:18 +0100 Subject: [PATCH 294/464] route53: add support for routing policies It is now possible to pass various routing policies if an identity is provided. This commit also introduces multiple optimisations: * Only fetch records for the given domain * Use UPSERT instead of DELETE+CREATE to update existing records --- cloud/amazon/route53.py | 133 ++++++++++++++++++++++++++++++---------- 1 file changed, 101 insertions(+), 32 deletions(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index d25be6b99ea..c2ad603a1f4 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -93,6 +93,45 @@ options: required: false default: false version_added: "1.9" + identifier: + description: + - Weighted and latency-based resource record sets only. An identifier + that differentiates among multiple resource record sets that have the + same combination of DNS name and type. + required: false + default: null + version_added: "2.0" + weight: + description: + - Weighted resource record sets only. Among resource record sets that + have the same combination of DNS name and type, a value that + determines what portion of traffic for the current resource record set + is routed to the associated location. + required: false + default: null + version_added: "2.0" + region: + description: + - Latency-based resource record sets only Among resource record sets + that have the same combination of DNS name and type, a value that + determines which region this should be associated with for the + latency-based routing + required: false + default: null + version_added: "2.0" + health_check: + description: + - Health check to associate with this record + required: false + default: null + version_added: "2.0" + failover: + description: + - Failover resource record sets only. Whether this is the primary or + secondary resource record set. + required: false + default: null + version_added: "2.0" author: "Bruce Pennypacker (@bpennypacker)" extends_documentation_fragment: aws ''' @@ -156,6 +195,18 @@ EXAMPLES = ''' alias=True alias_hosted_zone_id="{{ elb_zone_id }}" +# Use a routing policy to distribute traffic: +- route53: + command: "create" + zone: "foo.com" + record: "www.foo.com" + type: "CNAME" + value: "host1.foo.com" + ttl: 30 + # Routing policy + identifier: "host1@www" + weight: 100 + health_check: "d994b780-3150-49fd-9205-356abdd42e75" ''' @@ -166,11 +217,21 @@ try: import boto.ec2 from boto import route53 from boto.route53 import Route53Connection - from boto.route53.record import ResourceRecordSets + from boto.route53.record import Record, ResourceRecordSets HAS_BOTO = True except ImportError: HAS_BOTO = False +def get_zone_by_name(conn, module, zone_name, want_private): + """Finds a zone by name""" + for zone in conn.get_zones(): + # only save this zone id if the private status of the zone matches + # the private_zone_in boolean specified in the params + private_zone = module.boolean(zone.config.get('PrivateZone', False)) + if private_zone == want_private and zone.name == zone_name: + return zone + return None + def commit(changes, retry_interval): """Commit changes, but retry PriorRequestNotComplete errors.""" @@ -200,6 +261,11 @@ def main(): overwrite = dict(required=False, type='bool'), retry_interval = dict(required=False, default=500), private_zone = dict(required=False, type='bool', default=False), + identifier = dict(required=False), + weight = dict(required=False, type='int'), + region = dict(required=False), + health_check = dict(required=False), + failover = dict(required=False), ) ) module = AnsibleModule(argument_spec=argument_spec) @@ -217,6 +283,11 @@ def main(): alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id') retry_interval_in = module.params.get('retry_interval') private_zone_in = module.params.get('private_zone') + identifier_in = module.params.get('identifier') + weight_in = module.params.get('weight') + region_in = module.params.get('region') + health_check_in = module.params.get('health_check') + failover_in = module.params.get('failover') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) @@ -249,32 +320,34 @@ def main(): except boto.exception.BotoServerError, e: module.fail_json(msg = e.error_message) - # Get all the existing hosted zones and save their ID's - zones = {} - results = conn.get_all_hosted_zones() - for r53zone in results['ListHostedZonesResponse']['HostedZones']: - # only save this zone id if the private status of the zone matches - # the private_zone_in boolean specified in the params - if module.boolean(r53zone['Config'].get('PrivateZone', False)) == private_zone_in: - zone_id = r53zone['Id'].replace('/hostedzone/', '') - zones[r53zone['Name']] = zone_id + # Find the named zone ID + zone = get_zone_by_name(conn, module, zone_in, private_zone_in) # Verify that the requested zone is already defined in Route53 - if not zone_in in zones: + if zone is None: errmsg = "Zone %s does not exist in Route53" % zone_in module.fail_json(msg = errmsg) record = {} found_record = False - sets = conn.get_all_rrsets(zones[zone_in]) + wanted_rset = Record(name=record_in, type=type_in, ttl=ttl_in, + identifier=identifier_in, weight=weight_in, region=region_in, + health_check=health_check_in, failover=failover_in) + for v in value_list: + if alias_in: + wanted_rset.set_alias(alias_hosted_zone_id_in, v) + else: + wanted_rset.add_value(v) + + sets = conn.get_all_rrsets(zone.id, name=record_in, type=type_in, identifier=identifier_in) for rset in sets: # Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round # tripping of things like * and @. decoded_name = rset.name.replace(r'\052', '*') decoded_name = decoded_name.replace(r'\100', '@') - if rset.type == type_in and decoded_name.lower() == record_in.lower(): + if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == identifier_in: found_record = True record['zone'] = zone_in record['type'] = rset.type @@ -282,6 +355,11 @@ def main(): record['ttl'] = rset.ttl record['value'] = ','.join(sorted(rset.resource_records)) record['values'] = sorted(rset.resource_records) + record['identifier'] = rset.identifier + record['weight'] = rset.weight + record['region'] = rset.region + record['failover'] = rset.failover + record['health_check'] = rset.health_check if rset.alias_dns_name: record['alias'] = True record['value'] = rset.alias_dns_name @@ -291,8 +369,9 @@ def main(): record['alias'] = False record['value'] = ','.join(sorted(rset.resource_records)) record['values'] = sorted(rset.resource_records) - if value_list == sorted(rset.resource_records) and int(record['ttl']) == ttl_in and command_in == 'create': + if command_in == 'create' and rset.to_xml() == wanted_rset.to_xml(): module.exit_json(changed=False) + break if command_in == 'get': module.exit_json(changed=False, set=record) @@ -300,26 +379,16 @@ def main(): if command_in == 'delete' and not found_record: module.exit_json(changed=False) - changes = ResourceRecordSets(conn, zones[zone_in]) - - if command_in == 'create' and found_record: - if not module.params['overwrite']: - module.fail_json(msg = "Record already exists with different value. Set 'overwrite' to replace it") - else: - change = changes.add_change("DELETE", record_in, type_in, record['ttl']) - for v in record['values']: - if record['alias']: - change.set_alias(record['alias_hosted_zone_id'], v) - else: - change.add_value(v) + changes = ResourceRecordSets(conn, zone.id) if command_in == 'create' or command_in == 'delete': - change = changes.add_change(command_in.upper(), record_in, type_in, ttl_in) - for v in value_list: - if module.params['alias']: - change.set_alias(alias_hosted_zone_id_in, v) - else: - change.add_value(v) + if command_in == 'create' and found_record: + if not module.params['overwrite']: + module.fail_json(msg = "Record already exists with different value. Set 'overwrite' to replace it") + command = 'UPSERT' + else: + command = command_in.upper() + changes.add_change_record(command, wanted_rset) try: result = commit(changes, retry_interval_in) From 559ad374f573c0dda4c5ecb4cbc7d19a731e9524 Mon Sep 17 00:00:00 2001 From: Michael Weinrich Date: Fri, 19 Jun 2015 17:06:51 +0200 Subject: [PATCH 295/464] Add the option to pass a string as policy --- cloud/amazon/iam_policy.py | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index f1a6abdd0a6..32a25ae2517 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -40,7 +40,12 @@ options: aliases: [] policy_document: description: - - The path to the properly json formatted policy file + - The path to the properly json formatted policy file (mutually exclusive with C(policy_json)) + required: false + aliases: [] + policy_json: + description: + - A properly json formatted policy as string (mutually exclusive with C(policy_document), see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly) required: false aliases: [] state: @@ -109,6 +114,19 @@ task: state: present with_items: new_groups.results +# Create a new S3 policy with prefix per user +tasks: +- name: Create S3 policy from template + iam_policy: + iam_type: user + iam_name: "{{ item.user }}" + policy_name: "s3_limited_access_{{ item.s3_user_prefix }}" + state: present + policy_json: " {{ lookup( 'template', 's3_policy.json.j2') }} " + with_items: + - user: s3_user + prefix: s3_user_prefix + ''' import json import urllib @@ -271,6 +289,7 @@ def main(): iam_name=dict(default=None, required=False), policy_name=dict(default=None, required=True), policy_document=dict(default=None, required=False), + policy_json=dict(type='str', default=None, required=False), skip_duplicates=dict(type='bool', default=True, required=False) )) @@ -284,10 +303,19 @@ def main(): name = module.params.get('iam_name') policy_name = module.params.get('policy_name') skip = module.params.get('skip_duplicates') + + if module.params.get('policy_document') != None and module.params.get('policy_json') != None: + module.fail_json(msg='Only one of "policy_document" or "policy_json" may be set') + if module.params.get('policy_document') != None: with open(module.params.get('policy_document'), 'r') as json_data: pdoc = json.dumps(json.load(json_data)) json_data.close() + elif module.params.get('policy_json') != None: + try: + pdoc = json.dumps(json.loads(module.params.get('policy_json'))) + except Exception as e: + module.fail_json(msg=str(e) + '\n' + module.params.get('policy_json')) else: pdoc=None From c5324f54e61913a1573b1930fd599921c02319bc Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 24 Jun 2015 06:48:57 -0700 Subject: [PATCH 296/464] Bump amount of file to download in a chunk to 64k. --- files/unarchive.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/files/unarchive.py b/files/unarchive.py index 647c218460e..8053991b63d 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -94,6 +94,9 @@ from zipfile import ZipFile # String from tar that shows the tar contents are different from the # filesystem DIFFERENCE_RE = re.compile(r': (.*) differs$') +# When downloading an archive, how much of the archive to download before +# saving to a tempfile (64k) +BUFSIZE = 65536 class UnarchiveError(Exception): pass @@ -282,7 +285,7 @@ def main(): f = open(package, 'w') # Read 1kb at a time to save on ram while True: - data = rsp.read(1024) + data = rsp.read(BUFSIZE) if data == "": break # End of file, break while loop From 4519dd5f4d8fb1787bd81c56403b5fab02075dae Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 24 Jun 2015 06:51:00 -0700 Subject: [PATCH 297/464] Small cleanups. * Import url(lib|parse|lib2) if needed by the module rather than relying on module_utils.urls to do so. * Remove stdlib modules from requirements * Use the if __name__ conditional for invoking main() --- network/basics/get_url.py | 7 +++++-- packaging/os/rpm_key.py | 6 ++++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 074bf8bb484..f7ea5008cee 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -113,7 +113,7 @@ options: - all arguments accepted by the M(file) module also work here required: false # informational: requirements for nodes -requirements: [ urllib2, urlparse ] +requirements: [ ] author: "Jan-Piet Mens (@jpmens)" ''' @@ -125,6 +125,8 @@ EXAMPLES=''' get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf sha256sum=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c ''' +import urlparse + try: import hashlib HAS_HASHLIB=True @@ -315,4 +317,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() +if __name__ == '__main__': + main() diff --git a/packaging/os/rpm_key.py b/packaging/os/rpm_key.py index 1b38da3823b..d2d5e684015 100644 --- a/packaging/os/rpm_key.py +++ b/packaging/os/rpm_key.py @@ -60,9 +60,10 @@ EXAMPLES = ''' # Example action to ensure a key is not present in the db - rpm_key: state=absent key=DEADB33F ''' +import re import syslog import os.path -import re +import urllib2 import tempfile def is_pubkey(string): @@ -203,4 +204,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * -main() +if __name__ == '__main__': + main() From 73d5a8a63a9f250da0c867fb2efb927b3b91c183 Mon Sep 17 00:00:00 2001 From: Evan Carter Date: Wed, 24 Jun 2015 11:05:37 -0400 Subject: [PATCH 298/464] Fixing typo --- cloud/amazon/ec2_lc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 6c5e2c1dd4c..818e8efbb50 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -122,7 +122,7 @@ options: required: false default: null version_added: "2.0" - classic_link_vpc_security_groups" + classic_link_vpc_security_groups: description: - A list of security group id’s with which to associate the ClassicLink VPC instances. required: false From dba3bc75399ce520de6eb96b0a93c829114580d9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 24 Jun 2015 08:12:49 -0700 Subject: [PATCH 299/464] Read the url in in chunks so that we don't use as much memory for large packages --- packaging/os/yum.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 22e7ca4ad71..14339b4c18b 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -152,6 +152,9 @@ EXAMPLES = ''' yum: name="@Development tools" state=present ''' +# 64k. Number of bytes to read at a time when manually downloading pkgs via a url +BUFSIZE = 65536 + def_qf = "%{name}-%{version}-%{release}.%{arch}" def log(msg): @@ -526,9 +529,11 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): package = os.path.join(tempdir, str(pkg.rsplit('/', 1)[1])) try: rsp, info = fetch_url(module, pkg) - data = rsp.read() f = open(package, 'w') - f.write(data) + data = rsp.read(BUFSIZE) + while data: + f.write(data) + data = rsp.read(BUFSIZE) f.close() pkg = package except Exception, e: From dbed8cee3bf81b0482ed6ba611e45fd6f73a5381 Mon Sep 17 00:00:00 2001 From: Jay Taylor Date: Fri, 24 Apr 2015 14:26:37 -0700 Subject: [PATCH 300/464] Added support for spot request type specification (to support persistent spot requests). --- cloud/amazon/ec2.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 6d47fa6ac32..6fb6f4a5417 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -76,6 +76,13 @@ options: required: false default: null aliases: [] + spot_type: + description: + - Type of spot request; one of "one-time" or "persistent". Defaults to "one-time" if not supplied. + required: false + default: "one-time" + choices: [ "one-time", "persistent" ] + aliases: [] image: description: - I(ami) ID to use for the instance @@ -783,6 +790,7 @@ def create_instances(module, ec2, vpc, override_count=None): instance_type = module.params.get('instance_type') tenancy = module.params.get('tenancy') spot_price = module.params.get('spot_price') + spot_type = module.params.get('spot_type') image = module.params.get('image') if override_count: count = override_count @@ -976,6 +984,7 @@ def create_instances(module, ec2, vpc, override_count=None): params.update(dict( count = count_remaining, + type = spot_type, )) res = ec2.request_spot_instances(spot_price, **params) @@ -1220,6 +1229,7 @@ def main(): zone = dict(aliases=['aws_zone', 'ec2_zone']), instance_type = dict(aliases=['type']), spot_price = dict(), + spot_type = dict(default='one-time'), image = dict(), kernel = dict(), count = dict(type='int', default='1'), From 6611ee34a59c9b048d68933644dd0a2f1039574a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 24 Jun 2015 11:23:34 -0700 Subject: [PATCH 301/464] Fix for when the password file did not exist previously --- web_infrastructure/htpasswd.py | 45 ++++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/web_infrastructure/htpasswd.py b/web_infrastructure/htpasswd.py index 274f8fa38b2..bfb525b67eb 100644 --- a/web_infrastructure/htpasswd.py +++ b/web_infrastructure/htpasswd.py @@ -78,6 +78,7 @@ EXAMPLES = """ import os +import tempfile from distutils.version import StrictVersion try: @@ -199,28 +200,34 @@ def main(): module.fail_json(msg="This module requires the passlib Python library") # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error. - f = open(path, "r") try: - lines=f.readlines() - finally: - f.close - - # If the file gets edited, it returns true, so only edit the file if it has blank lines - strip = False - for line in lines: - if not line.strip(): - strip = True - - if strip: - # If check mode, create a temporary file - if check_mode: - temp = tempfile.NamedTemporaryFile() - path = temp.name - f = open(path,"w") + f = open(path, "r") + except IOError: + # No preexisting file to remove blank lines from + f = None + else: try: - [f.write(line) for line in lines if line.strip() ] + lines = f.readlines() finally: - f.close + f.close() + + # If the file gets edited, it returns true, so only edit the file if it has blank lines + strip = False + for line in lines: + if not line.strip(): + strip = True + break + + if strip: + # If check mode, create a temporary file + if check_mode: + temp = tempfile.NamedTemporaryFile() + path = temp.name + f = open(path, "w") + try: + [ f.write(line) for line in lines if line.strip() ] + finally: + f.close() try: if state == 'present': From 65c63b3afa9343b65ea3c919b632443cd5e7eade Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 24 Jun 2015 14:43:04 -0400 Subject: [PATCH 302/464] Updating version_added for new spot_type param in ec2 module Also made sure 'choices' were set on the module param, to catch errors in user's playbooks, etc. --- cloud/amazon/ec2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 6fb6f4a5417..b79395fb3a1 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -77,6 +77,7 @@ options: default: null aliases: [] spot_type: + version_added: "2.0" description: - Type of spot request; one of "one-time" or "persistent". Defaults to "one-time" if not supplied. required: false @@ -1229,7 +1230,7 @@ def main(): zone = dict(aliases=['aws_zone', 'ec2_zone']), instance_type = dict(aliases=['type']), spot_price = dict(), - spot_type = dict(default='one-time'), + spot_type = dict(default='one-time', choices=["one-time", "persistent"]), image = dict(), kernel = dict(), count = dict(type='int', default='1'), From 00322c43fc7095e926fff25837343cb700b6a9a2 Mon Sep 17 00:00:00 2001 From: Patrick Roby Date: Fri, 14 Nov 2014 16:07:29 -0800 Subject: [PATCH 303/464] Add support for listing keys in a specific S3 bucket Includes support for specifying a prefix, marker, and/or max_keys. Returns a list of key names (as strings). --- cloud/amazon/s3.py | 39 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 9bec312294a..ecf35d00f5d 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -64,7 +64,7 @@ options: version_added: "1.6" mode: description: - - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket), delete (bucket), and delobj (delete object). + - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys), create (bucket), delete (bucket), and delobj (delete object). required: true default: null aliases: [] @@ -129,6 +129,12 @@ EXAMPLES = ''' # PUT/upload with metadata - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache' +# List keys simple +- s3: bucket=mybucket mode=list + +# List keys all options +- s3: bucket=mybucket mode=list prefix=/my/desired/ marker=/my/desired/0023.txt max_keys=472 + # Create an empty bucket - s3: bucket=mybucket mode=create @@ -204,6 +210,19 @@ def create_bucket(module, s3, bucket, location=None): if bucket: return True +def get_bucket(module, s3, bucket): + try: + return s3.lookup(bucket) + except s3.provider.storage_response_error, e: + module.fail_json(msg= str(e)) + +def list_keys(module, bucket_object, prefix, marker, max_keys): + all_keys = bucket_object.get_all_keys(prefix=prefix, marker=marker, max_keys=max_keys) + + keys = map((lambda x: x.key), all_keys) + + module.exit_json(msg="LIST operation complete", s3_keys=keys) + def delete_bucket(module, s3, bucket): try: bucket = s3.lookup(bucket) @@ -329,11 +348,14 @@ def main(): dest = dict(default=None), encrypt = dict(default=True, type='bool'), expiry = dict(default=600, aliases=['expiration']), + marker = dict(default=None), + max_keys = dict(default=1000), metadata = dict(type='dict'), - mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj'], required=True), + mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True), object = dict(), version = dict(default=None), overwrite = dict(aliases=['force'], default='always'), + prefix = dict(default=None), retries = dict(aliases=['retry'], type='int', default=0), s3_url = dict(aliases=['S3_URL']), src = dict(), @@ -349,11 +371,14 @@ def main(): expiry = int(module.params['expiry']) if module.params.get('dest'): dest = os.path.expanduser(module.params.get('dest')) + marker = module.params.get('marker') + max_keys = module.params.get('max_keys') metadata = module.params.get('metadata') mode = module.params.get('mode') obj = module.params.get('object') version = module.params.get('version') overwrite = module.params.get('overwrite') + prefix = module.params.get('prefix') retries = module.params.get('retries') s3_url = module.params.get('s3_url') src = module.params.get('src') @@ -537,6 +562,16 @@ def main(): else: module.fail_json(msg="Bucket parameter is required.", failed=True) + # Support for listing a set of keys + if mode == 'list': + bucket_object = get_bucket(module, s3, bucket) + + # If the bucket does not exist then bail out + if bucket_object is None: + module.fail_json(msg="Target bucket (%s) cannot be found"% bucket, failed=True) + + list_keys(module, bucket_object, prefix, marker, max_keys) + # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. if mode == 'create': From 50912c9092eb567c5dc61c47eecd2ccc585ae364 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 24 Jun 2015 16:32:47 -0700 Subject: [PATCH 304/464] Fix apt_repository so that it does not modify the mode of existing repositories --- packaging/os/apt_repository.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/packaging/os/apt_repository.py b/packaging/os/apt_repository.py index 496f5c5e269..eee58f77729 100644 --- a/packaging/os/apt_repository.py +++ b/packaging/os/apt_repository.py @@ -126,6 +126,8 @@ class InvalidSource(Exception): class SourcesList(object): def __init__(self): self.files = {} # group sources by file + # Repositories that we're adding -- used to implement mode param + self.new_repos = set() self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist') # read sources.list if it exists @@ -257,8 +259,9 @@ class SourcesList(object): module.atomic_move(tmp_path, filename) # allow the user to override the default mode - this_mode = module.params['mode'] - module.set_mode_if_different(filename, this_mode, False) + if filename in self.new_repos: + this_mode = module.params['mode'] + module.set_mode_if_different(filename, this_mode, False) else: del self.files[filename] if os.path.exists(filename): @@ -300,6 +303,7 @@ class SourcesList(object): files = self.files[file] files.append((len(files), True, True, source_new, comment_new)) + self.new_repos.add(file) def add_source(self, line, comment='', file=None): source = self._parse(line, raise_if_invalid_or_disabled=True)[2] From 964d73172207df628cc7e8cfa9c9782d28f93fa9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 25 Jun 2015 08:29:42 -0700 Subject: [PATCH 305/464] Add version_added documentation to log_driver parameter --- cloud/docker/docker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index c50f5f53e32..9986c94f9ec 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -108,6 +108,7 @@ options: - json-file - none - syslog + version_added: "2.0" memory_limit: description: - RAM allocated to the container as a number of bytes or as a human-readable From bed420cd531c30c0865bf331c74035494b612a1e Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Thu, 25 Jun 2015 12:19:20 -0400 Subject: [PATCH 306/464] Update os_keypair for latest shade Uses the latest version of shade for cleaner code. Also, always return the key dict whether we create the key, or it already exists. The example using public_key_file is corrected to use a full path since ~ is not converted for us. --- cloud/openstack/os_keypair.py | 80 +++++++++++++++++++---------------- 1 file changed, 44 insertions(+), 36 deletions(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index b404e6cc02a..a9c2640628f 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -41,12 +41,14 @@ options: default: None public_key: description: - - The public key that would be uploaded to nova and injected to vm's upon creation + - The public key that would be uploaded to nova and injected into VMs + upon creation. required: false default: None public_key_file: description: - - Path to local file containing ssh public key. Mutually exclusive with public_key + - Path to local file containing ssh public key. Mutually exclusive + with public_key. required: false default: None state: @@ -63,7 +65,7 @@ EXAMPLES = ''' cloud: mordred state: present name: ansible_key - public_key_file: ~/.ssh/id_rsa.pub + public_key_file: /home/me/.ssh/id_rsa.pub # Creates a new key pair and the private key returned after the run. - os_keypair: @@ -73,16 +75,33 @@ EXAMPLES = ''' ''' +def _system_state_change(module, keypair): + state = module.params['state'] + if state == 'present' and not keypair: + return True + if state == 'absent' and keypair: + return True + return False + + def main(): argument_spec = openstack_full_argument_spec( name = dict(required=True), public_key = dict(default=None), public_key_file = dict(default=None), - state = dict(default='present', choices=['absent', 'present']), + state = dict(default='present', + choices=['absent', 'present']), ) + module_kwargs = openstack_module_kwargs( mutually_exclusive=[['public_key', 'public_key_file']]) - module = AnsibleModule(argument_spec, **module_kwargs) + + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') state = module.params['state'] name = module.params['name'] @@ -90,44 +109,33 @@ def main(): if module.params['public_key_file']: public_key = open(module.params['public_key_file']).read() - - if not HAS_SHADE: - module.fail_json(msg='shade is required for this module') + public_key = public_key.rstrip() try: cloud = shade.openstack_cloud(**module.params) + keypair = cloud.get_keypair(name) + + if module.check_mode: + module.exit_json(changed=_system_state_change(module, keypair)) if state == 'present': - for key in cloud.list_keypairs(): - if key.name == name: - if public_key and (public_key != key.public_key): - module.fail_json( - msg="Key name %s present but key hash not the same" - " as offered. Delete key first." % key.name - ) - else: - module.exit_json(changed=False, result="Key present") - try: - key = cloud.create_keypair(name, public_key) - except Exception, e: - module.exit_json( - msg="Error in creating the keypair: %s" % e.message - ) - if not public_key: - module.exit_json(changed=True, key=key.private_key) - module.exit_json(changed=True, key=None) + if keypair and keypair['name'] == name: + if public_key and (public_key != keypair['public_key']): + module.fail_json( + msg="Key name %s present but key hash not the same" + " as offered. Delete key first." % name + ) + else: + module.exit_json(changed=False, key=keypair) + + new_key = cloud.create_keypair(name, public_key) + module.exit_json(changed=True, key=new_key) elif state == 'absent': - for key in cloud.list_keypairs(): - if key.name == name: - try: - cloud.delete_keypair(name) - except Exception, e: - module.fail_json( - msg="Keypair deletion has failed: %s" % e.message - ) - module.exit_json(changed=True, result="deleted") - module.exit_json(changed=False, result="not present") + if keypair: + cloud.delete_keypair(name) + module.exit_json(changed=True) + module.exit_json(changed=False) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From 280ccfbb78e5e80c6f820ec371a848ff48fe9913 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 25 Jun 2015 09:28:39 -0700 Subject: [PATCH 307/464] Add note about redirects proxies Fixes #1574 --- network/basics/get_url.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index f7ea5008cee..64cd24b6d09 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -38,6 +38,8 @@ description: (see `setting the environment `_), or by using the use_proxy option. + - HTTP redirects can redirect from HTTP to HTTPS so you should be sure that + your proxy environment for both protocols is correct. version_added: "0.6" options: url: From 892212b9c434bb4d3f8c0a788f9155284d50f209 Mon Sep 17 00:00:00 2001 From: Juan Picca Date: Thu, 12 Feb 2015 09:25:36 -0200 Subject: [PATCH 308/464] synchronize module: add partial option --- files/synchronize.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/files/synchronize.py b/files/synchronize.py index 7f934e4e6f4..761529742de 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -152,6 +152,12 @@ options: default: required: false version_added: "1.6" + partial: + description: + - Tells rsync to keep the partial file which should make a subsequent transfer of the rest of the file much faster. + default: no + required: false + version_added: "1.9" notes: - rsync must be installed on both the local and remote machine. - Inspect the verbose output to validate the destination user/host/path @@ -237,6 +243,7 @@ def main(): rsync_timeout = dict(type='int', default=0), rsync_opts = dict(type='list'), ssh_args = dict(type='str'), + partial = dict(default='no', type='bool'), ), supports_check_mode = True ) @@ -254,6 +261,7 @@ def main(): compress = module.params['compress'] existing_only = module.params['existing_only'] dirs = module.params['dirs'] + partial = module.params['partial'] # the default of these params depends on the value of archive recursive = module.params['recursive'] links = module.params['links'] @@ -332,6 +340,9 @@ def main(): if rsync_opts: cmd = cmd + " " + " ".join(rsync_opts) + if partial: + cmd = cmd + " --partial" + changed_marker = '<>' cmd = cmd + " --out-format='" + changed_marker + "%i %n%L'" From 8ba96aaf4bb5c7e3534408be693ead01c4c49027 Mon Sep 17 00:00:00 2001 From: Patrick Roby Date: Thu, 25 Jun 2015 10:36:07 -0700 Subject: [PATCH 309/464] update documentation, adding new params --- cloud/amazon/s3.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index ecf35d00f5d..4edac74366b 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -56,6 +56,18 @@ options: required: false default: 600 aliases: [] + marker: + description: + - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order. + required: false + default: null + version_added: "2.0" + max_keys: + description: + - Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys. + required: false + default: 1000 + version_added: "2.0" metadata: description: - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. @@ -64,7 +76,7 @@ options: version_added: "1.6" mode: description: - - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys), create (bucket), delete (bucket), and delobj (delete object). + - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys (2.0+)), create (bucket), delete (bucket), and delobj (delete object). required: true default: null aliases: [] @@ -73,6 +85,12 @@ options: - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. required: false default: null + prefix: + description: + - Limits the response to keys that begin with the specified prefix for list mode + required: false + default: null + version_added: "2.0" version: description: - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket. From e90d02c35cfacda523eebdecdac14cc3194dc04d Mon Sep 17 00:00:00 2001 From: Patrick Roby Date: Thu, 25 Jun 2015 10:37:17 -0700 Subject: [PATCH 310/464] iterate through all keys in a more pythonic manner --- cloud/amazon/s3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 4edac74366b..8c5221e3c1f 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -237,7 +237,7 @@ def get_bucket(module, s3, bucket): def list_keys(module, bucket_object, prefix, marker, max_keys): all_keys = bucket_object.get_all_keys(prefix=prefix, marker=marker, max_keys=max_keys) - keys = map((lambda x: x.key), all_keys) + keys = [x.key for x in all_keys] module.exit_json(msg="LIST operation complete", s3_keys=keys) From d435d5ce0ae2597fdde4600dd07edbb8c9c4fdfe Mon Sep 17 00:00:00 2001 From: verm666 Date: Thu, 25 Jun 2015 10:56:29 -0700 Subject: [PATCH 311/464] This change is in response to issue #133. The original problem is: apt_repository.py connect to launchpad on every playbook run. In this patch apt_repository.py checks if required repository already exists or not. If no - paa will be added, if yes - just skip actions. --- packaging/os/apt_repository.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/packaging/os/apt_repository.py b/packaging/os/apt_repository.py index eee58f77729..8f6d18d09d5 100644 --- a/packaging/os/apt_repository.py +++ b/packaging/os/apt_repository.py @@ -378,6 +378,25 @@ class UbuntuSourcesList(SourcesList): source = self._parse(line, raise_if_invalid_or_disabled=True)[2] self._remove_valid_source(source) + @property + def repos_urls(self): + _repositories = [] + for parsed_repos in self.files.values(): + for parsed_repo in parsed_repos: + enabled = parsed_repo[1] + source_line = parsed_repo[3] + + if not enabled: + continue + + if source_line.startswith('ppa:'): + source, ppa_owner, ppa_name = self._expand_ppa(i[3]) + _repositories.append(source) + else: + _repositories.append(source_line) + + return _repositories + def get_add_ppa_signing_key_callback(module): def _run_command(command): @@ -425,8 +444,13 @@ def main(): sources_before = sourceslist.dump() + if repo.startswith('ppa:'): + expanded_repo = sourceslist._expand_ppa(repo)[0] + else: + expanded_repo = repo + try: - if state == 'present': + if state == 'present' and expanded_repo not in sourceslist.repos_urls: sourceslist.add_source(repo) elif state == 'absent': sourceslist.remove_source(repo) From 2206477b739f767215ae0dadf4dea6e5cf36168f Mon Sep 17 00:00:00 2001 From: Vladimir Martsul Date: Fri, 26 Jun 2015 01:40:58 +0600 Subject: [PATCH 312/464] Add "force" description Add "force" option description --- files/template.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/files/template.py b/files/template.py index 2feb599abdf..a1dc72c27bd 100644 --- a/files/template.py +++ b/files/template.py @@ -47,6 +47,14 @@ options: required: false default: "" version_added: "1.2" + force: + description: + - the default is C(yes), which will replace the remote file when contents + are different than the source. If C(no), the file will only be transferred + if the destination does not exist. + required: false + choices: [ "yes", "no" ] + default: "yes" notes: - "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)." requirements: [] From a5bba2488f2775da8cb08f4b9fb3f3c5d230ad2b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 25 Jun 2015 12:44:08 -0700 Subject: [PATCH 313/464] Update version_added to 2.0 for the partial option --- files/synchronize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/synchronize.py b/files/synchronize.py index 761529742de..abad5ad359f 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -157,7 +157,7 @@ options: - Tells rsync to keep the partial file which should make a subsequent transfer of the rest of the file much faster. default: no required: false - version_added: "1.9" + version_added: "2.0" notes: - rsync must be installed on both the local and remote machine. - Inspect the verbose output to validate the destination user/host/path From 29e4a127e19fee326c5c698d249f6b9791b9e705 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Thu, 25 Jun 2015 17:11:38 -0500 Subject: [PATCH 314/464] Default net to 'bridge' in container diff This prevents an unnecessary reload when the `net` parameter is unspecified (i.e. almost always). --- cloud/docker/docker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 9986c94f9ec..a6090c4b0c1 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1108,8 +1108,8 @@ class DockerManager(object): # NETWORK MODE - expected_netmode = self.module.params.get('net') or '' - actual_netmode = container['HostConfig']['NetworkMode'] + expected_netmode = self.module.params.get('net') or 'bridge' + actual_netmode = container['HostConfig']['NetworkMode'] or 'bridge' if actual_netmode != expected_netmode: self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode)) differing.append(container) From dc3161dfdd24c2a43806e4783c9c86f8fd6b72f0 Mon Sep 17 00:00:00 2001 From: Simon Olofsson Date: Thu, 20 Nov 2014 00:21:01 +0100 Subject: [PATCH 315/464] Add option docker_user for docker module. docker_user can be used to specify the user or UID to use within the container. --- cloud/docker/docker.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 9986c94f9ec..e723a14f0fb 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -160,6 +160,13 @@ options: specified by docker-py. default: docker-py default remote API version version_added: "1.8" + docker_user: + description: + - Username or UID to use within the container + required: false + default: + aliases: [] + version_added: "2.0" username: description: - Remote API username. @@ -1303,6 +1310,7 @@ class DockerManager(object): 'stdin_open': self.module.params.get('stdin_open'), 'tty': self.module.params.get('tty'), 'host_config': self.create_host_config(), + 'user': self.module.params.get('docker_user'), } def do_create(count, params): @@ -1495,6 +1503,7 @@ def main(): tls_ca_cert = dict(required=False, default=None, type='str'), tls_hostname = dict(required=False, type='str', default=None), docker_api_version = dict(required=False, default=DEFAULT_DOCKER_API_VERSION, type='str'), + docker_user = dict(default=None), username = dict(default=None), password = dict(), email = dict(), From e3d9b51cbb954a43f138ea02a9f9311ff7555ca9 Mon Sep 17 00:00:00 2001 From: Davide Guerri Date: Fri, 26 Jun 2015 12:53:20 +0100 Subject: [PATCH 316/464] Update os_floating_ip with new shade methods --- cloud/openstack/os_floating_ip.py | 283 +++++++++++++----------------- 1 file changed, 118 insertions(+), 165 deletions(-) diff --git a/cloud/openstack/os_floating_ip.py b/cloud/openstack/os_floating_ip.py index 2d939a9bcd7..9755b1d4159 100644 --- a/cloud/openstack/os_floating_ip.py +++ b/cloud/openstack/os_floating_ip.py @@ -1,8 +1,6 @@ #!/usr/bin/python -# coding: utf-8 -*- - -# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. -# Copyright (c) 2013, Benno Joy +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# Author: Davide Guerri # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -17,9 +15,10 @@ # You should have received a copy of the GNU General Public License # along with this software. If not, see . - try: import shade + from shade import meta + HAS_SHADE = True except ImportError: HAS_SHADE = False @@ -38,19 +37,39 @@ options: - The name or ID of the instance to which the IP address should be assigned. required: true - network_name: + network: description: - - Name of the network from which IP has to be assigned to VM. - Please make sure the network is an external network. - - Required if ip_address is not given. - required: true - default: None - internal_network_name: + - The name or ID of a neutron external network or a nova pool name. + required: false + floating_ip_address: + description: + - A floating IP address to attach or to detach. Required only if state + is absent. When state is present can be used to specify a IP address + to attach. + required: false + reuse: + description: + - When state is present, and floating_ip_address is not present, + this parameter can be used to specify whether we should try to reuse + a floating IP address already allocated to the project. + required: false + default: false + fixed_address: + description: + - To which fixed IP of server the floating IP address should be + attached to. + required: false + wait: description: - - Name of the network of the port to associate with the floating ip. - Necessary when VM multiple networks. + - When attaching a floating IP address, specify whether we should + wait for it to appear as attached. required: false - default: None + default false + timeout: + description: + - Time to wait for an IP address to appear as attached. See wait. + required: false + default 60 state: description: - Should the resource be present or absent. @@ -61,136 +80,54 @@ requirements: ["shade"] ''' EXAMPLES = ''' -# Assign a floating ip to the instance from an external network +# Assign a floating IP to the fist interface of `cattle001` from an exiting +# external network or nova pool. If a free floating IP is already allocated to +# the project, it is reused; if not, a new one is created. - os_floating_ip: - cloud: mordred + cloud: dguerri + server: cattle001 + +# Assign a new floating IP to the instance fixed ip `192.0.2.3` of +# `cattle001`. A new floating IP from the external network (or nova pool) +# ext_net is created. +- os_floating_ip: + cloud: dguerri state: present - server: vm1 - network_name: external_network - internal_network_name: internal_network + reuse: false + server: cattle001 + network: ext_net + fixed_address: 192.0.2.3 + wait: true + timeout: 180 + +# Detach a floating IP address from a server +- os_floating_ip: + cloud: dguerri + state: absent + floating_ip_address: 203.0.113.2 + server: cattle001 ''' -def _get_server_state(module, cloud): - info = None - server = cloud.get_server(module.params['server']) - if server: - info = server._info - status = info['status'] - if status != 'ACTIVE' and module.params['state'] == 'present': - module.fail_json( - msg="The VM is available but not Active. State: %s" % status - ) - return info, server - - -def _get_port_info(neutron, module, instance_id, internal_network_name=None): - subnet_id = None - if internal_network_name: - kwargs = {'name': internal_network_name} - networks = neutron.list_networks(**kwargs) - network_id = networks['networks'][0]['id'] - kwargs = { - 'network_id': network_id, - 'ip_version': 4 - } - subnets = neutron.list_subnets(**kwargs) - subnet_id = subnets['subnets'][0]['id'] - - kwargs = { - 'device_id': instance_id, - } - try: - ports = neutron.list_ports(**kwargs) - except Exception, e: - module.fail_json(msg="Error in listing ports: %s" % e.message) - - if subnet_id: - port = next(port for port in ports['ports'] if port['fixed_ips'][0]['subnet_id'] == subnet_id) - port_id = port['id'] - fixed_ip_address = port['fixed_ips'][0]['ip_address'] - else: - port_id = ports['ports'][0]['id'] - fixed_ip_address = ports['ports'][0]['fixed_ips'][0]['ip_address'] - - if not ports['ports']: - return None, None - return fixed_ip_address, port_id - - -def _get_floating_ip(neutron, module, fixed_ip_address): - kwargs = { - 'fixed_ip_address': fixed_ip_address - } - try: - ips = neutron.list_floatingips(**kwargs) - except Exception, e: - module.fail_json( - msg="Error in fetching the floatingips's %s" % e.message - ) - - if not ips['floatingips']: - return None, None - - return (ips['floatingips'][0]['id'], - ips['floatingips'][0]['floating_ip_address']) - - -def _create_and_associate_floating_ip(neutron, module, port_id, - net_id, fixed_ip): - kwargs = { - 'port_id': port_id, - 'floating_network_id': net_id, - 'fixed_ip_address': fixed_ip - } - - try: - result = neutron.create_floatingip({'floatingip': kwargs}) - except Exception, e: - module.fail_json( - msg="Error in updating the floating ip address: %s" % e.message - ) - - module.exit_json( - changed=True, - result=result, - public_ip=result['floatingip']['floating_ip_address'] - ) - - -def _get_public_net_id(neutron, module): - kwargs = { - 'name': module.params['network_name'], - } - try: - networks = neutron.list_networks(**kwargs) - except Exception, e: - module.fail_json("Error in listing neutron networks: %s" % e.message) - if not networks['networks']: +def _get_floating_ip(cloud, floating_ip_address): + f_ips = cloud.search_floating_ips( + filters={'floating_ip_address': floating_ip_address}) + if not f_ips: return None - return networks['networks'][0]['id'] - -def _update_floating_ip(neutron, module, port_id, floating_ip_id): - kwargs = { - 'port_id': port_id - } - try: - result = neutron.update_floatingip(floating_ip_id, - {'floatingip': kwargs}) - except Exception, e: - module.fail_json( - msg="Error in updating the floating ip address: %s" % e.message - ) - module.exit_json(changed=True, result=result) + return f_ips[0] def main(): argument_spec = openstack_full_argument_spec( - server = dict(required=True), - network_name = dict(required=True), - internal_network_name = dict(default=None), - state = dict(default='present', choices=['absent', 'present']), + server=dict(required=True), + state=dict(default='present', choices=['absent', 'present']), + network=dict(required=False), + floating_ip_address=dict(required=False), + reuse=dict(required=False, type='bool', default=False), + fixed_address=dict(required=False), + wait=dict(required=False, type='bool', default=False), + timeout=dict(required=False, type='int', default=60), ) module_kwargs = openstack_module_kwargs() @@ -199,47 +136,63 @@ def main(): if not HAS_SHADE: module.fail_json(msg='shade is required for this module') + server_name_or_id = module.params['server'] state = module.params['state'] - internal_network_name = module.params['internal_network_name'] + network = module.params['network'] + floating_ip_address = module.params['floating_ip_address'] + reuse = module.params['reuse'] + fixed_address = module.params['fixed_address'] + wait = module.params['wait'] + timeout = module.params['timeout'] - try: - cloud = shade.openstack_cloud(**module.params) - neutron = cloud.neutron_client - - server_info, server_obj = _get_server_state(module, cloud) - if not server_info: - module.fail_json(msg="The server provided cannot be found") - - fixed_ip, port_id = _get_port_info( - neutron, module, server_info['id'], internal_network_name) - if not port_id: - module.fail_json(msg="Cannot find a port for this instance," - " maybe fixed ip is not assigned") + cloud = shade.openstack_cloud(**module.params) - floating_id, floating_ip = _get_floating_ip(neutron, module, fixed_ip) + try: + server = cloud.get_server(server_name_or_id) + if server is None: + module.fail_json( + msg="server {0} not found".format(server_name_or_id)) if state == 'present': - if floating_ip: - # This server already has a floating IP assigned - module.exit_json(changed=False, public_ip=floating_ip) - - pub_net_id = _get_public_net_id(neutron, module) - if not pub_net_id: - module.fail_json( - msg="Cannot find the public network specified" - ) - _create_and_associate_floating_ip(neutron, module, port_id, - pub_net_id, fixed_ip) + if floating_ip_address is None: + if reuse: + f_ip = cloud.available_floating_ip(network=network) + else: + f_ip = cloud.create_floating_ip(network=network) + else: + f_ip = _get_floating_ip(cloud, floating_ip_address) + if f_ip is None: + module.fail_json( + msg="floating IP {0} not found".format( + floating_ip_address)) + + cloud.attach_ip_to_server( + server_id=server['id'], floating_ip_id=f_ip['id'], + fixed_address=fixed_address, wait=wait, timeout=timeout) + # Update the floating IP status + f_ip = cloud.get_floating_ip(id=f_ip['id']) + module.exit_json(changed=True, floating_ip=f_ip) elif state == 'absent': - if floating_ip: - _update_floating_ip(neutron, module, None, floating_id) - module.exit_json(changed=False) + if floating_ip_address is None: + module.fail_json(msg="floating_ip_address is required") + + f_ip = _get_floating_ip(cloud, floating_ip_address) + + cloud.detach_ip_from_server( + server_id=server['id'], floating_ip_id=f_ip['id']) + # Update the floating IP status + f_ip = cloud.get_floating_ip(id=f_ip['id']) + module.exit_json(changed=True, floating_ip=f_ip) except shade.OpenStackCloudException as e: - module.fail_json(msg=e.message) + module.fail_json(msg=e.message, extra_data=e.extra_data) + # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -main() + + +if __name__ == '__main__': + main() From 725a7b2f59a296467439edde5aab75dc9552e60d Mon Sep 17 00:00:00 2001 From: verm666 Date: Fri, 26 Jun 2015 05:49:59 -0700 Subject: [PATCH 317/464] unarchive: fix work with 0 bytes archives This change is in response to issue #1575 --- files/unarchive.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/files/unarchive.py b/files/unarchive.py index 8053991b63d..a3544253402 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -300,6 +300,16 @@ def main(): if not os.access(src, os.R_OK): module.fail_json(msg="Source '%s' not readable" % src) + # skip working with 0 size archives + try: + if os.path.getsize(src) == 0: + res_args = { + 'changed': False + } + module.exit_json(**res_args) + except Exception, e: + module.fail_json(msg="Source '%s' not readable" % src) + # is dest OK to receive tar file? if not os.path.isdir(dest): module.fail_json(msg="Destination '%s' is not a directory" % dest) From 304e187a52abd165afaf5c4dd88ac28b66bfc149 Mon Sep 17 00:00:00 2001 From: Davide Guerri Date: Fri, 26 Jun 2015 14:53:48 +0100 Subject: [PATCH 318/464] Fix reuse argument documentation --- cloud/openstack/os_floating_ip.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/openstack/os_floating_ip.py b/cloud/openstack/os_floating_ip.py index 9755b1d4159..5bd29240a67 100644 --- a/cloud/openstack/os_floating_ip.py +++ b/cloud/openstack/os_floating_ip.py @@ -81,19 +81,19 @@ requirements: ["shade"] EXAMPLES = ''' # Assign a floating IP to the fist interface of `cattle001` from an exiting -# external network or nova pool. If a free floating IP is already allocated to -# the project, it is reused; if not, a new one is created. +# external network or nova pool. A new floating IP from the first available +# external network is allocated to the project. - os_floating_ip: cloud: dguerri server: cattle001 # Assign a new floating IP to the instance fixed ip `192.0.2.3` of -# `cattle001`. A new floating IP from the external network (or nova pool) -# ext_net is created. +# `cattle001`. If a free floating IP is already allocated to the project, it is +# reused; if not, a new one is created. - os_floating_ip: cloud: dguerri state: present - reuse: false + reuse: yes server: cattle001 network: ext_net fixed_address: 192.0.2.3 From a81dea2b17428127b507888fd0c2fad59c1aca1e Mon Sep 17 00:00:00 2001 From: "Roetman, Victor" Date: Fri, 26 Jun 2015 14:50:29 -0400 Subject: [PATCH 319/464] apache2_module documetation update requires a2enmod and a2dismod --- web_infrastructure/apache2_module.py | 1 + 1 file changed, 1 insertion(+) diff --git a/web_infrastructure/apache2_module.py b/web_infrastructure/apache2_module.py index ec9a8985e60..cb43ba9b0eb 100644 --- a/web_infrastructure/apache2_module.py +++ b/web_infrastructure/apache2_module.py @@ -35,6 +35,7 @@ options: choices: ['present', 'absent'] default: present +requirements: ["a2enmod","a2dismod"] ''' EXAMPLES = ''' From d651b4169133ed8ef17d63d0418f733061fc1a6d Mon Sep 17 00:00:00 2001 From: Robb Wagoner Date: Fri, 26 Jun 2015 15:39:08 -0700 Subject: [PATCH 320/464] return health of instances and counts --- cloud/amazon/ec2_elb_lb.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 566db2d329a..9d626a98194 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -384,9 +384,33 @@ class ElbManager(object): 'hosted_zone_name': check_elb.canonical_hosted_zone_name, 'hosted_zone_id': check_elb.canonical_hosted_zone_name_id, 'lb_cookie_policy': lb_cookie_policy, - 'app_cookie_policy': app_cookie_policy + 'app_cookie_policy': app_cookie_policy, + 'instances': [instance.id for instance in check_elb.instances], + 'out_of_service_count': 0, + 'in_service_count': 0, + 'unknown_instance_state_count': 0 } + # status of instances behind the ELB + if info['instances']: + info['instance_health'] = [ dict({ + "instance_id": instance_state.instance_id, + "reason_code": instance_state.reason_code, + "state": instance_state.state, + }) for instance_state in self.elb_conn.describe_instance_health(self.name)] + else: + info['instance_health'] = [] + + # instance state counts: InService or OutOfService + if info['instance_health']: + for instance_state in info['instance_health']: + if instance_state['state'] == "InService": + info['in_service_count'] += 1 + elif instance_state['state'] == "OutOfService": + info['out_of_service_count'] += 1 + else: + info['unknown_instance_state_count'] =+ 1 + if check_elb.health_check: info['health_check'] = { 'target': check_elb.health_check.target, From 5e674ddcfc3ab317d2aa05fc84dfe206768cfdab Mon Sep 17 00:00:00 2001 From: Robb Wagoner Date: Thu, 11 Dec 2014 08:21:03 -0700 Subject: [PATCH 321/464] include all launch config properties in the return make all properties available when registering the result which is useful when wanting to launch a stand-alone instance based upon an existing Launch Config. --- cloud/amazon/ec2_lc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 3c292377a58..592d179a02b 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -225,7 +225,8 @@ def create_launch_config(connection, module): module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time), image_id=result.image_id, arn=result.launch_configuration_arn, - security_groups=result.security_groups, instance_type=instance_type) + security_groups=result.security_groups, instance_type=result.instance_type, + result=result) def delete_launch_config(connection, module): From 6f6d7f5c18296e3fe84fa5aef674948753ff52ae Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 27 Jun 2015 00:10:25 -0400 Subject: [PATCH 322/464] updated docs to clarify use of exclusive --- system/authorized_key.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index bb223acbe4d..9d944a7d724 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -34,7 +34,6 @@ options: - The username on the remote host whose authorized_keys file will be modified required: true default: null - aliases: [] key: description: - The SSH public key(s), as a string or (since 1.9) url (https://github.com/username.keys) @@ -72,9 +71,11 @@ options: version_added: "1.4" exclusive: description: - - Whether to remove all other non-specified keys from the - authorized_keys file. Multiple keys can be specified in a single - key= string value by separating them by newlines. + - Whether to remove all other non-specified keys from the authorized_keys file. Multiple keys + can be specified in a single C(key) string value by separating them by newlines. + - This option is not loop aware, so if you use C(with_) , it will be exclusive per iteration + of the loop, if you want multiple keys in the file you need to pass them all to C(key) in a + single batch as mentioned above. required: false choices: [ "yes", "no" ] default: "no" From 2d3e93e55823d03891e1c6612e959ee785f17575 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 27 Jun 2015 00:36:55 -0400 Subject: [PATCH 323/464] added doc to note that git the command line tool is required for this moduel to function fixes http://github.com/ansible/ansible/issues/11358 --- source_control/git.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source_control/git.py b/source_control/git.py index 369430211f3..bc35c97da93 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -173,7 +173,8 @@ options: to be installed. The commit MUST be signed and the public key MUST be trusted in the GPG trustdb. - +requirements: + - git (the command line tool) notes: - "If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, From 5e82f7e11e0ac7cc7cdaeffb0787209afae79fb0 Mon Sep 17 00:00:00 2001 From: ToBeReplaced Date: Sat, 27 Jun 2015 11:41:01 -0600 Subject: [PATCH 324/464] Make ALL_IN_SCHEMA for tables affect views ALL TABLES is considered to include views, so we must check for reltypes 'r' and 'v', not just 'r'. This bug was introduced due to using a custom, backwards-compatible version of "ALL TABLES IN SCHEMA". --- database/postgresql/postgresql_privs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/postgresql/postgresql_privs.py b/database/postgresql/postgresql_privs.py index 10f2361bfb2..8fefd3de648 100644 --- a/database/postgresql/postgresql_privs.py +++ b/database/postgresql/postgresql_privs.py @@ -315,7 +315,7 @@ class Connection(object): query = """SELECT relname FROM pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - WHERE nspname = %s AND relkind = 'r'""" + WHERE nspname = %s AND relkind in ('r', 'v')""" self.cursor.execute(query, (schema,)) return [t[0] for t in self.cursor.fetchall()] From e80073ff8761fd48a1bb4a5cf8dd6970bbcf5084 Mon Sep 17 00:00:00 2001 From: Tal Auslander Date: Sun, 28 Jun 2015 13:45:48 +0300 Subject: [PATCH 325/464] stop reading from url on error --- windows/win_get_url.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index e5e1ea73c83..23463b681c0 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -62,7 +62,7 @@ Else { $stream = New-Object System.IO.StreamReader($response.GetResponseStream()) - $stream.ReadToEnd() | Set-Content -Path $dest -Force + $stream.ReadToEnd() | Set-Content -Path $dest -Force -ErrorAction Stop $result.changed = $true } From 786ec1dca156323a2ce83ba3bbe507d85ba22840 Mon Sep 17 00:00:00 2001 From: whiter Date: Sat, 27 Jun 2015 21:54:19 +1000 Subject: [PATCH 326/464] ec2_vpc_net refactor --- cloud/amazon/ec2_vpc_net.py | 324 +++++++++++++++--------------------- 1 file changed, 137 insertions(+), 187 deletions(-) diff --git a/cloud/amazon/ec2_vpc_net.py b/cloud/amazon/ec2_vpc_net.py index 33c711e7683..41186ed0ab2 100644 --- a/cloud/amazon/ec2_vpc_net.py +++ b/cloud/amazon/ec2_vpc_net.py @@ -17,10 +17,11 @@ DOCUMENTATION = ''' --- module: ec2_vpc_net -short_description: configure AWS virtual private clouds +short_description: Configure AWS virtual private clouds description: - - Create or terminates AWS virtual private clouds. This module has a dependency on python-boto. + - Create or terminate AWS virtual private clouds. This module has a dependency on python-boto. version_added: "2.0" +author: Jonathan Davila (@defionscode) options: name: description: @@ -30,23 +31,25 @@ options: description: - The CIDR of the VPC required: yes - aliases: [] tenancy: description: - Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created. required: false default: default + choices: [ 'default', 'dedicated' ] dns_support: description: - Whether to enable AWS DNS support. required: false - default: true + default: yes + choices: [ 'yes', 'no' ] dns_hostnames: description: - Whether to enable AWS hostname support. required: false - default: true - dhcp_id: + default: yes + choices: [ 'yes', 'no' ] + dhcp_opts_id: description: - the id of the DHCP options to use for this vpc default: null @@ -61,30 +64,32 @@ options: - The state of the VPC. Either absent or present. default: present required: false + choices: [ 'present', 'absent' ] multi_ok: description: - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created. default: false required: false -author: Jonathan Davila + extends_documentation_fragment: aws ''' EXAMPLES = ''' +# Note: These examples do not set authentication details, see the AWS Guide for details. + # Create a VPC with dedicate tenancy and a couple of tags -- ec2_vpc: +- ec2_vpc_net: name: Module_dev2 - cidr_block: 170.10.0.0/16 + cidr_block: 10.10.0.0/16 region: us-east-1 tags: - new_vpc: ec2_vpc_module - this: works22 + module: ec2_vpc_net + this: works tenancy: dedicated ''' - import time import sys @@ -92,8 +97,7 @@ try: import boto import boto.ec2 import boto.vpc - from boto.exception import EC2ResponseError - + from boto.exception import BotoServerError HAS_BOTO=True except ImportError: HAS_BOTO=False @@ -110,12 +114,11 @@ def boto_exception(err): return error def vpc_exists(module, vpc, name, cidr_block, multi): - """Returns True or False in regards to the existance of a VPC. When supplied + """Returns True or False in regards to the existence of a VPC. When supplied with a CIDR, it will check for matching tags to determine if it is a match otherwise it will assume the VPC does not exist and thus return false. """ - exists=False - matched_vpc=None + matched_vpc = None try: matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block}) @@ -123,114 +126,69 @@ def vpc_exists(module, vpc, name, cidr_block, multi): e_msg=boto_exception(e) module.fail_json(msg=e_msg) - if len(matching_vpcs) == 1 and not multi: - exists=True - matched_vpc=str(matching_vpcs).split(':')[1].split(']')[0] - elif len(matching_vpcs) > 1 and not multi: - module.fail_json(msg='Currently there are %d VPCs that have the same name and ' + if len(matching_vpcs) == 1: + matched_vpc = matching_vpcs[0] + elif len(matching_vpcs) > 1: + if multi: + module.fail_json(msg='Currently there are %d VPCs that have the same name and ' 'CIDR block you specified. If you would like to create ' - 'the VPC anyways please pass True to the multi_ok param.' % len(matching_vpcs)) - - return exists, matched_vpc - -def vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags): - """This returns True or False. Intended to run after vpc_exists. - It will check all the characteristics of the parameters passed and compare them - to the active VPC. If any discrepancy is found, it will report true, meaning that - the VPC needs to be update in order to match the specified state in the params. - """ - - update_dhcp=False - update_tags=False - dhcp_match=False - - try: - dhcp_list=vpc.get_all_dhcp_options() - - if dhcp_id is not None: - has_default=vpc.get_all_vpcs(filters={'dhcp-options-id' : 'default', 'vpc-id' : vpc_id}) - for opts in dhcp_list: - if (str(opts).split(':')[1] == dhcp_id) or has_default: - dhcp_match=True - break - else: - pass - except Exception, e: - e_msg=boto_exception(e) - module.fail_json(msg=e_msg) - - if not dhcp_match or (has_default and dhcp_id != 'default'): - update_dhcp=True + 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs)) + + return matched_vpc - if dns_hostnames and dns_support == False: - module.fail_json('In order to enable DNS Hostnames you must have DNS support enabled') - else: - - # Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute - # which is needed in order to detect the current status of DNS options. For now we just update - # the attribute each time and is not used as a changed-factor. - try: - vpc.modify_vpc_attribute(vpc_id, enable_dns_support=dns_support) - vpc.modify_vpc_attribute(vpc_id, enable_dns_hostnames=dns_hostnames) - except Exception, e: - e_msg=boto_exception(e) - module.fail_json(msg=e_msg) - if tags: - try: - current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id})) - if not set(tags.items()).issubset(set(current_tags.items())): - update_tags=True - except Exception, e: - e_msg=boto_exception(e) - module.fail_json(msg=e_msg) - - return update_dhcp, update_tags - - -def update_vpc_tags(module, vpc, vpc_id, tags, name): +def update_vpc_tags(vpc, module, vpc_obj, tags, name): + + if tags is None: + tags = dict() + tags.update({'Name': name}) try: - vpc.create_tags(vpc_id, tags) - updated_tags=dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id})) - except Exception, e: - e_msg=boto_exception(e) - module.fail_json(msg=e_msg) - - return updated_tags - - -def update_dhcp_opts(module, vpc, vpc_id, dhcp_id): - try: - vpc.associate_dhcp_options(dhcp_id, vpc_id) - dhcp_list=vpc.get_all_dhcp_options() + current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id})) + if sorted(current_tags) != sorted(tags): + vpc.create_tags(vpc_obj.id, tags) + return True + else: + return False except Exception, e: e_msg=boto_exception(e) module.fail_json(msg=e_msg) + - for opts in dhcp_list: - vpc_dhcp=vpc.get_all_vpcs(filters={'dhcp-options-id' : opts, 'vpc-id' : vpc_id}) - matched=False - if opts == dhcp_id: - matched=True - return opts - - if matched == False: - return dhcp_id +def update_dhcp_opts(connection, module, vpc_obj, dhcp_id): + + if vpc_obj.dhcp_options_id != dhcp_id: + connection.associate_dhcp_options(dhcp_id, vpc_obj.id) + return True + else: + return False + +def get_vpc_values(vpc_obj): + + if vpc_obj is not None: + vpc_values = vpc_obj.__dict__ + if "region" in vpc_values: + vpc_values.pop("region") + if "item" in vpc_values: + vpc_values.pop("item") + if "connection" in vpc_values: + vpc_values.pop("connection") + return vpc_values + else: + return None def main(): argument_spec=ec2_argument_spec() argument_spec.update(dict( - name=dict(type='str', default=None, required=True), - cidr_block=dict(type='str', default=None, required=True), - tenancy=dict(choices=['default', 'dedicated'], default='default'), - dns_support=dict(type='bool', default=True), - dns_hostnames=dict(type='bool', default=True), - dhcp_opts_id=dict(type='str', default=None, required=False), - tags=dict(type='dict', required=False, default=None), - state=dict(choices=['present', 'absent'], default='present'), - region=dict(type='str', required=True), - multi_ok=dict(type='bool', default=False) + name = dict(type='str', default=None, required=True), + cidr_block = dict(type='str', default=None, required=True), + tenancy = dict(choices=['default', 'dedicated'], default='default'), + dns_support = dict(type='bool', default=True), + dns_hostnames = dict(type='bool', default=True), + dhcp_opts_id = dict(type='str', default=None, required=False), + tags = dict(type='dict', required=False, default=None), + state = dict(choices=['present', 'absent'], default='present'), + multi_ok = dict(type='bool', default=False) ) ) @@ -239,7 +197,7 @@ def main(): ) if not HAS_BOTO: - module.fail_json(msg='Boto is required for this module') + module.fail_json(msg='boto is required for this module') name=module.params.get('name') cidr_block=module.params.get('cidr_block') @@ -250,93 +208,85 @@ def main(): tags=module.params.get('tags') state=module.params.get('state') multi=module.params.get('multi_ok') - + changed=False - new_dhcp_opts=None - new_tags=None - update_dhcp=False - update_tags=False - region, ec2_url, aws_connect_kwargs=get_aws_connection_info(module) - - try: - vpc=boto.vpc.connect_to_region( - region, - **aws_connect_kwargs - ) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - - already_exists, vpc_id=vpc_exists(module, vpc, name, cidr_block, multi) - - if already_exists: - update_dhcp, update_tags=vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags) - if update_dhcp or update_tags: - changed=True + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + + if region: + try: + connection = connect_to_aws(boto.vpc, region, **aws_connect_params) + except (boto.exception.NoAuthHandlerFound, StandardError), e: + module.fail_json(msg=str(e)) + else: + module.fail_json(msg="region must be specified") + + if dns_hostnames and not dns_support: + module.fail_json('In order to enable DNS Hostnames you must also enable DNS support') + if state == 'present': + + # Check if VPC exists + vpc_obj = vpc_exists(module, connection, name, cidr_block, multi) + + if vpc_obj is None: + try: + vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy) + changed = True + except BotoServerError, e: + module.fail_json(msg=e) + + if dhcp_id is not None: + try: + if update_dhcp_opts(connection, module, vpc_obj, dhcp_id): + changed = True + except BotoServerError, e: + module.fail_json(msg=e) + + if tags is not None or name is not None: + try: + if update_vpc_tags(connection, module, vpc_obj, tags, name): + changed = True + except BotoServerError, e: + module.fail_json(msg=e) + + + # Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute + # which is needed in order to detect the current status of DNS options. For now we just update + # the attribute each time and is not used as a changed-factor. try: - e_tags=dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id})) - dhcp_list=vpc.get_all_dhcp_options() - has_default=vpc.get_all_vpcs(filters={'dhcp-options-id' : 'default', 'vpc-id' : vpc_id}) - except Exception, e: + connection.modify_vpc_attribute(vpc_obj.id, enable_dns_support=dns_support) + connection.modify_vpc_attribute(vpc_obj.id, enable_dns_hostnames=dns_hostnames) + except BotoServerError, e: e_msg=boto_exception(e) module.fail_json(msg=e_msg) - - dhcp_opts=None - + + # get the vpc obj again in case it has changed try: - for opts in dhcp_list: - if vpc.get_all_vpcs(filters={'dhcp-options-id' : opts, 'vpc-id' : vpc_id}): - dhcp_opts=opts - break - else: - pass - except Exception, e: + vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0] + except BotoServerError, e: e_msg=boto_exception(e) module.fail_json(msg=e_msg) + + module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj)) - if not dhcp_opts and has_default: - dhcp_opts='default' - - if state == 'present': - - if not changed and already_exists: - module.exit_json(changed=changed, vpc_id=vpc_id) - elif changed: - if update_dhcp: - dhcp_opts=update_dhcp_opts(module, vpc, vpc_id, dhcp_id) - if update_tags: - e_tags=update_vpc_tags(module, vpc, vpc_id, tags, name) - - module.exit_json(changed=changed, name=name, dhcp_options_id=dhcp_opts, tags=e_tags) - - if not already_exists: - try: - vpc_id=str(vpc.create_vpc(cidr_block, instance_tenancy=tenancy)).split(':')[1] - vpc.create_tags(vpc_id, dict(Name=name)) - except Exception, e: - e_msg=boto_exception(e) - module.fail_json(msg=e_msg) - - update_dhcp, update_tags=vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags) - - if update_dhcp: - new_dhcp_opts=update_dhcp_opts(module, vpc, vpc_id, dhcp_id) - if update_tags: - new_tags=update_vpc_tags(module, vpc, vpc_id, tags, name) - module.exit_json(changed=True, name=name, vpc_id=vpc_id, dhcp_options=new_dhcp_opts, tags=new_tags) elif state == 'absent': - if already_exists: - changed=True + + # Check if VPC exists + vpc_obj = vpc_exists(module, connection, name, cidr_block, multi) + + if vpc_obj is not None: try: - vpc.delete_vpc(vpc_id) - module.exit_json(changed=changed, vpc_id=vpc_id) - except Exception, e: - e_msg=boto_exception(e) + connection.delete_vpc(vpc_obj.id) + vpc_obj = None + changed = True + except BotoServerError, e: + e_msg = boto_exception(e) module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, " - "and/or ec2_vpc_rt modules to ensure the other components are absent." % e_msg) - else: - module.exit_json(msg="VPC is absent") + "and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg) + + module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj)) + # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * From 7b0b75ceedf526826ebf591709afea4c8fdde7bb Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 29 Jun 2015 10:34:24 -0400 Subject: [PATCH 327/464] Fix dict syntax typo --- cloud/openstack/os_client_config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_client_config.py b/cloud/openstack/os_client_config.py index a12cd8fe65a..2c4af5c8c08 100644 --- a/cloud/openstack/os_client_config.py +++ b/cloud/openstack/os_client_config.py @@ -52,9 +52,9 @@ EXAMPLES = ''' def main(): - module = AnsibleModule({ + module = AnsibleModule(argument_spec=dict( clouds=dict(required=False, default=[]), - }) + )) p = module.params try: From 73390f8ecc4df506a04a0406a42ecbea7d57501b Mon Sep 17 00:00:00 2001 From: James Meickle Date: Mon, 29 Jun 2015 13:23:03 -0400 Subject: [PATCH 328/464] Change uri debug example --- network/basics/uri.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index 3de17c12d60..8095eaffe67 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -269,7 +269,7 @@ def url_filename(url): def uri(module, url, dest, user, password, body, body_format, method, headers, redirects, socket_timeout, validate_certs): # To debug - #httplib2.debug = 4 + #httplib2.debuglevel = 4 # Handle Redirects if redirects == "all" or redirects == "yes": From 692045f693665f810736d0e07782e62bb4fb1f2d Mon Sep 17 00:00:00 2001 From: Scott Miller Date: Mon, 29 Jun 2015 14:15:23 -0400 Subject: [PATCH 329/464] update docs for cloudformation --- cloud/amazon/cloudformation.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index dee292aeba3..cccdd156f20 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -51,6 +51,7 @@ options: template: description: - The local path of the cloudformation template. This parameter is mutually exclusive with 'template_url'. Either one of them is required if "state" parameter is "present" + Must give full path to the file, relative to the playbook. If using roles this may look like "roles/cloudformation/files/cloudformation-example.json" required: false default: null aliases: [] @@ -115,6 +116,22 @@ EXAMPLES = ''' tags: Stack: "ansible-cloudformation" +# Basic role example +- name: launch ansible cloudformation example + cloudformation: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1" + disable_rollback: true + template: "roles/cloudformation/files/cloudformation-example.json" + template_parameters: + KeyName: "jmartin" + DiskType: "ephemeral" + InstanceType: "m1.small" + ClusterSize: 3 + tags: + Stack: "ansible-cloudformation" + # Removal example - name: tear down old deployment cloudformation: From e6fc129013b0dfd2873fad648a867cc87dc76cc6 Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Mon, 29 Jun 2015 14:49:13 -0400 Subject: [PATCH 330/464] Add a note about the return value. --- cloud/openstack/os_keypair.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index a9c2640628f..f485d7fd2fc 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -33,6 +33,10 @@ extends_documentation_fragment: openstack version_added: "2.0" description: - Add or Remove key pair from OpenStack +notes: + - The module returns a dictionary describing the keypair, with + keys including: id, name, public_key. A private_key entry may + also be included if a keypair was generated for you. options: name: description: From def5fdcb2123b8a0146fe8b94bf19f82db3248a5 Mon Sep 17 00:00:00 2001 From: Jonathan Davila Date: Mon, 29 Jun 2015 15:14:50 -0400 Subject: [PATCH 331/464] no_log to iam password --- cloud/amazon/iam.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index bda953faab4..df8f3423411 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -509,7 +509,7 @@ def main(): groups=dict(type='list', default=None, required=False), state=dict( default=None, required=True, choices=['present', 'absent', 'update']), - password=dict(default=None, required=False), + password=dict(default=None, required=False, no_log=True), update_password=dict(default='always', required=False, choices=['always', 'on_create']), access_key_state=dict(default=None, required=False, choices=[ 'active', 'inactive', 'create', 'remove', From 5da9c6a1c77d40b4d52ac3ff9799e5bcb0ab3847 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 29 Jun 2015 12:42:50 -0700 Subject: [PATCH 332/464] Add testing of docs to the core repo --- .travis.yml | 1 + test-docs.sh | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100755 test-docs.sh diff --git a/.travis.yml b/.travis.yml index 0e3a2af23b3..9a65ec487d3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,3 +14,4 @@ script: - python2.4 -m compileall -fq cloud/amazon/_ec2_ami_search.py cloud/amazon/ec2_facts.py - python2.6 -m compileall -fq . - python2.7 -m compileall -fq . + - ./test-docs.sh core diff --git a/test-docs.sh b/test-docs.sh new file mode 100755 index 00000000000..76297fbada6 --- /dev/null +++ b/test-docs.sh @@ -0,0 +1,21 @@ +#!/bin/sh +set -x + +CHECKOUT_DIR=".ansible-checkout" +MOD_REPO="$1" + +# Hidden file to avoid the module_formatter recursing into the checkout +git clone https://github.com/ansible/ansible "$CHECKOUT_DIR" +cd "$CHECKOUT_DIR" +git submodule update --init +rm -rf "lib/ansible/modules/$MOD_REPO" +ln -s "$TRAVIS_BUILD_DIR/" "lib/ansible/modules/$MOD_REPO" + +pip install -U Jinja2 PyYAML setuptools six pycrypto sphinx + +. ./hacking/env-setup +PAGER=/bin/cat bin/ansible-doc -l +if [ $? -ne 0 ] ; then + exit $? +fi +make -C docsite From 7970924bd56e2bbd53f6588b023ca3497afc6ebb Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Mon, 29 Jun 2015 15:55:15 -0400 Subject: [PATCH 333/464] Use newest documentation style for return value. --- cloud/openstack/os_keypair.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index f485d7fd2fc..7a0c1ca47a0 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -33,10 +33,6 @@ extends_documentation_fragment: openstack version_added: "2.0" description: - Add or Remove key pair from OpenStack -notes: - - The module returns a dictionary describing the keypair, with - keys including: id, name, public_key. A private_key entry may - also be included if a keypair was generated for you. options: name: description: @@ -78,6 +74,26 @@ EXAMPLES = ''' name: ansible_key ''' +RETURN = ''' +id: + description: Unique UUID. + returned: success + type: string +name: + description: Name given to the keypair. + returned: success + type: string +public_key: + description: The public key value for the keypair. + returned: success + type: string +private_key: + description: The private key value for the keypair. + returned: Only when a keypair is generated for the user (e.g., when creating one + and a public key is not specified). + type: string +''' + def _system_state_change(module, keypair): state = module.params['state'] From 7edacf6b1c480099eabd6f9ad9ad21d056ac4053 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 29 Jun 2015 13:20:15 -0700 Subject: [PATCH 334/464] Use module.fail_json() instead of sys.exit() --- cloud/amazon/iam_policy.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index f1a6abdd0a6..26d65450ec9 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -112,13 +112,12 @@ task: ''' import json import urllib -import sys try: import boto import boto.iam + HAS_BOTO = True except ImportError: - print "failed=True msg='boto required for this module'" - sys.exit(1) + HAS_BOTO = False def boto_exception(err): '''generic error message handler''' @@ -278,6 +277,9 @@ def main(): argument_spec=argument_spec, ) + if not HAS_BOTO: + module.fail_json(msg='boto required for this module') + state = module.params.get('state').lower() iam_type = module.params.get('iam_type').lower() state = module.params.get('state') From ddc78c82a4db6e8ee8c377fc08178e16fafdbbf0 Mon Sep 17 00:00:00 2001 From: Jesse Keating Date: Mon, 29 Jun 2015 14:06:50 -0700 Subject: [PATCH 335/464] Document auto_floating_ip argument --- cloud/openstack/os_server.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index 78a46f78c04..959f39880f8 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -90,6 +90,11 @@ options: - Ensure instance has public ip however the cloud wants to do that required: false default: 'yes' + auto_floating_ip: + description: + - If the module should automatically assign a floating IP + required: false + default: 'yes' floating_ips: description: - list of valid floating IPs that pre-exist to assign to this node From 4da3a724f1d57f5e1fe7f29804d82d835cceb3a5 Mon Sep 17 00:00:00 2001 From: Michael Weinrich Date: Sun, 21 Jun 2015 23:51:14 +0200 Subject: [PATCH 336/464] Fix connection creation to allow usage of profiles with boto --- cloud/amazon/iam_policy.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index 26d65450ec9..72e70221d29 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -115,6 +115,7 @@ import urllib try: import boto import boto.iam + import boto.ec2 HAS_BOTO = True except ImportError: HAS_BOTO = False @@ -293,13 +294,10 @@ def main(): else: pdoc=None - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) try: - iam = boto.iam.connection.IAMConnection( - aws_access_key_id=aws_access_key, - aws_secret_access_key=aws_secret_key, - ) + iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) From 02ea210db9f60ab68b5ae4e18f7150f3e5993954 Mon Sep 17 00:00:00 2001 From: Andreas Reischuck Date: Sat, 27 Jun 2015 23:34:16 +0200 Subject: [PATCH 337/464] fixed win_file state=touch --- windows/win_file.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/windows/win_file.ps1 b/windows/win_file.ps1 index 0f3c20ec8e3..f8416120abf 100644 --- a/windows/win_file.ps1 +++ b/windows/win_file.ps1 @@ -56,7 +56,7 @@ If ( $state -eq "touch" ) } Else { - echo $null > $file + echo $null > $path } $result.changed = $TRUE } From 4ef5a45347558349f0fa23e138bf18559dd9a672 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 30 Jun 2015 05:08:38 -0700 Subject: [PATCH 338/464] Add version that the profilename param was added --- packaging/os/rhn_register.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/os/rhn_register.py b/packaging/os/rhn_register.py index 4207acc8c28..b67b442aa22 100644 --- a/packaging/os/rhn_register.py +++ b/packaging/os/rhn_register.py @@ -61,6 +61,7 @@ options: - supply an profilename for use with registration required: False default: null + version_added: "2.0" channels: description: - Optionally specify a list of comma-separated channels to subscribe to upon successful registration. From 8deee99fcc72852e7275746c2793976790881d50 Mon Sep 17 00:00:00 2001 From: verm666 Date: Tue, 30 Jun 2015 08:14:30 -0700 Subject: [PATCH 339/464] unarchive: fix @bcoca's remarks, issue #1575 --- files/unarchive.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/files/unarchive.py b/files/unarchive.py index a3544253402..3ee83de0dcd 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -303,10 +303,7 @@ def main(): # skip working with 0 size archives try: if os.path.getsize(src) == 0: - res_args = { - 'changed': False - } - module.exit_json(**res_args) + module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src) except Exception, e: module.fail_json(msg="Source '%s' not readable" % src) From edad5c80ffc49706d44c98ee449c436b352a8817 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 30 Jun 2015 13:18:56 -0700 Subject: [PATCH 340/464] Few minor things from review of the pull request --- cloud/openstack/os_keypair.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index 7a0c1ca47a0..73656883a76 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -56,7 +56,7 @@ options: - Should the resource be present or absent. choices: [present, absent] default: present -requirements: ["shade"] +requirements: [] ''' EXAMPLES = ''' @@ -163,4 +163,5 @@ def main(): # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -main() +if __name__ == '__main__': + main() From 02b6df3160e66f92ef0e0cea363bce9472ce94b5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 1 Jul 2015 05:00:08 -0700 Subject: [PATCH 341/464] Fix indentation levels in os_keypair --- cloud/openstack/os_keypair.py | 42 +++++++++++++++++------------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index 73656883a76..f62cc51bf64 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -32,30 +32,30 @@ short_description: Add/Delete a keypair from OpenStack extends_documentation_fragment: openstack version_added: "2.0" description: - - Add or Remove key pair from OpenStack + - Add or Remove key pair from OpenStack options: - name: - description: - - Name that has to be given to the key pair - required: true - default: None - public_key: - description: - - The public key that would be uploaded to nova and injected into VMs - upon creation. - required: false - default: None - public_key_file: - description: - - Path to local file containing ssh public key. Mutually exclusive - with public_key. + name: + description: + - Name that has to be given to the key pair + required: true + default: None + public_key: + description: + - The public key that would be uploaded to nova and injected into VMs + upon creation. + required: false + default: None + public_key_file: + description: + - Path to local file containing ssh public key. Mutually exclusive + with public_key. required: false default: None - state: - description: - - Should the resource be present or absent. - choices: [present, absent] - default: present + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present requirements: [] ''' From b00b3f2b3c57cae8131fb15abbd0ddb0f3515cfb Mon Sep 17 00:00:00 2001 From: verm666 Date: Wed, 1 Jul 2015 07:04:45 -0700 Subject: [PATCH 342/464] fix authorized_keys in check_mode This change is in response to issue #1515. Original pull request #1580. The original problem is: in authorized_key module you have no idea about users which will be created by Ansible at first run. I can propose next two ways to solve this problem: 1. Combine modules system/user.py and system/authorized_key.py in one module (so you will know everything about users in that module) 2. Use small workaround: add my commit and always provide 'path' parameter for authorized_key module during runs with --check option. --- system/authorized_key.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index bb223acbe4d..e52b4e7556a 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -138,7 +138,7 @@ import shlex class keydict(dict): """ a dictionary that maintains the order of keys as they are added """ - + # http://stackoverflow.com/questions/2328235/pythonextend-the-dict-class def __init__(self, *args, **kw): @@ -146,7 +146,7 @@ class keydict(dict): self.itemlist = super(keydict,self).keys() def __setitem__(self, key, value): self.itemlist.append(key) - super(keydict,self).__setitem__(key, value) + super(keydict,self).__setitem__(key, value) def __iter__(self): return iter(self.itemlist) def keys(self): @@ -154,7 +154,7 @@ class keydict(dict): def values(self): return [self[key] for key in self] def itervalues(self): - return (self[key] for key in self) + return (self[key] for key in self) def keyfile(module, user, write=False, path=None, manage_dir=True): """ @@ -168,6 +168,13 @@ def keyfile(module, user, write=False, path=None, manage_dir=True): :return: full path string to authorized_keys for user """ + if module.check_mode: + if path is None: + module.fail_json(msg="You must provide full path to key file in check mode") + else: + keysfile = path + return keysfile + try: user_entry = pwd.getpwnam(user) except KeyError, e: @@ -214,8 +221,8 @@ def keyfile(module, user, write=False, path=None, manage_dir=True): return keysfile def parseoptions(module, options): - ''' - reads a string containing ssh-key options + ''' + reads a string containing ssh-key options and returns a dictionary of those options ''' options_dict = keydict() #ordered dict @@ -246,7 +253,7 @@ def parsekey(module, raw_key): 'ssh-ed25519', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384', - 'ecdsa-sha2-nistp521', + 'ecdsa-sha2-nistp521', 'ssh-dss', 'ssh-rsa', ] From 910728f6c3b49de97df9af2abc730ff589230754 Mon Sep 17 00:00:00 2001 From: Matthew Gilliard Date: Wed, 1 Jul 2015 12:07:27 +0100 Subject: [PATCH 343/464] Handle race condition in directory creation. If we try to make a directory, but someone else creates the directory at the same time as us, we don't need to raise that error to the user. They asked for the directory to exist, and now it does. This fixes the race condition which was causing that error to be raised, and closes #1648. --- files/file.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/files/file.py b/files/file.py index 55d3665028e..ba5afd6809f 100644 --- a/files/file.py +++ b/files/file.py @@ -18,6 +18,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +import errno import shutil import stat import grp @@ -280,7 +281,13 @@ def main(): if not os.path.isabs(path): curpath = curpath.lstrip('/') if not os.path.exists(curpath): - os.mkdir(curpath) + try: + os.mkdir(curpath) + except OSError, ex: + # Possibly something else created the dir since the os.path.exists + # check above. As long as it's a dir, we don't need to error out. + if not (ex.errno == errno.EEXISTS and os.isdir(curpath)): + raise tmp_file_args = file_args.copy() tmp_file_args['path']=curpath changed = module.set_fs_attributes_if_different(tmp_file_args, changed) From 1b21e37fcbc135608b602bcc011bbcaeabd59ca3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 1 Jul 2015 07:24:39 -0700 Subject: [PATCH 344/464] Disable travis docs checks --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 9a65ec487d3..91d1b9585d7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,4 +14,4 @@ script: - python2.4 -m compileall -fq cloud/amazon/_ec2_ami_search.py cloud/amazon/ec2_facts.py - python2.6 -m compileall -fq . - python2.7 -m compileall -fq . - - ./test-docs.sh core + #- ./test-docs.sh core From 5a254e6303b82f8fe73e6ab7b1579ac0c8e36e14 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 16:22:50 -0500 Subject: [PATCH 345/464] Replace tabbed indentation with spaces for mysql_db module --- database/mysql/mysql_db.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index e9a530811d4..c018ad143db 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -326,7 +326,7 @@ def main(): if state in ['dump','import']: if target is None: module.fail_json(msg="with state=%s target is required" % (state)) - if db == 'all': + if db == 'all': connect_to_db = 'mysql' db = 'mysql' all_databases = True From 9eb4219f79446c2302e346f6e4464ea2ead8626e Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 16:23:28 -0500 Subject: [PATCH 346/464] Replaced tabbed indentation with spaces for apt module --- packaging/os/apt.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 09129a73fa5..9172c69763d 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -230,10 +230,10 @@ def package_status(m, pkgname, version, cache, state): try: provided_packages = cache.get_providing_packages(pkgname) if provided_packages: - is_installed = False + is_installed = False # when virtual package providing only one package, look up status of target package if cache.is_virtual_package(pkgname) and len(provided_packages) == 1: - package = provided_packages[0] + package = provided_packages[0] installed, upgradable, has_files = package_status(m, package.name, version, cache, state='install') if installed: is_installed = True From 4f43c4c09cf717b2cb0b59041f3e2da21cedf1a9 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 16:23:51 -0500 Subject: [PATCH 347/464] Replaced tabbed indentation with spaces for subversion module --- source_control/subversion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source_control/subversion.py b/source_control/subversion.py index e3ff6dbfba5..cae4702e174 100644 --- a/source_control/subversion.py +++ b/source_control/subversion.py @@ -121,7 +121,7 @@ class Subversion(object): def checkout(self): '''Creates new svn working directory if it does not already exist.''' self._exec(["checkout", "-r", self.revision, self.repo, self.dest]) - + def export(self, force=False): '''Export svn repo to directory''' cmd = ["export"] From b6b576abf6c2e73c8fd4a5308c0cfff00f6d300d Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 16:24:01 -0500 Subject: [PATCH 348/464] Replaced tabbed indentation with spaces for group module --- system/group.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/group.py b/system/group.py index d952cb5c28c..53ab5f904dc 100644 --- a/system/group.py +++ b/system/group.py @@ -121,7 +121,7 @@ class Group(object): if len(cmd) == 1: return (None, '', '') if self.module.check_mode: - return (0, '', '') + return (0, '', '') cmd.append(self.name) return self.execute_command(cmd) From c2d0fbd45ba882c8a211f645e22e029d8c0b8b2a Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 30 Jun 2015 16:24:23 -0500 Subject: [PATCH 349/464] Remove unnecessary imports in a docs only file for win_copy --- windows/win_copy.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/windows/win_copy.py b/windows/win_copy.py index efdebc5a4a6..acc6c9ef2e0 100644 --- a/windows/win_copy.py +++ b/windows/win_copy.py @@ -18,8 +18,6 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import os -import time DOCUMENTATION = ''' --- From ed179fe379da90dafd8e89be94402511a899e49a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 1 Jul 2015 10:39:50 -0400 Subject: [PATCH 350/464] now captures any exceptions when trying to create directories --- files/file.py | 44 ++++++++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/files/file.py b/files/file.py index ba5afd6809f..c3267f7f18b 100644 --- a/files/file.py +++ b/files/file.py @@ -271,26 +271,30 @@ def main(): module.exit_json(changed=True) changed = True curpath = '' - # Split the path so we can apply filesystem attributes recursively - # from the root (/) directory for absolute paths or the base path - # of a relative path. We can then walk the appropriate directory - # path to apply attributes. - for dirname in path.strip('/').split('/'): - curpath = '/'.join([curpath, dirname]) - # Remove leading slash if we're creating a relative path - if not os.path.isabs(path): - curpath = curpath.lstrip('/') - if not os.path.exists(curpath): - try: - os.mkdir(curpath) - except OSError, ex: - # Possibly something else created the dir since the os.path.exists - # check above. As long as it's a dir, we don't need to error out. - if not (ex.errno == errno.EEXISTS and os.isdir(curpath)): - raise - tmp_file_args = file_args.copy() - tmp_file_args['path']=curpath - changed = module.set_fs_attributes_if_different(tmp_file_args, changed) + + try: + # Split the path so we can apply filesystem attributes recursively + # from the root (/) directory for absolute paths or the base path + # of a relative path. We can then walk the appropriate directory + # path to apply attributes. + for dirname in path.strip('/').split('/'): + curpath = '/'.join([curpath, dirname]) + # Remove leading slash if we're creating a relative path + if not os.path.isabs(path): + curpath = curpath.lstrip('/') + if not os.path.exists(curpath): + try: + os.mkdir(curpath) + except OSError, ex: + # Possibly something else created the dir since the os.path.exists + # check above. As long as it's a dir, we don't need to error out. + if not (ex.errno == errno.EEXISTS and os.isdir(curpath)): + raise + tmp_file_args = file_args.copy() + tmp_file_args['path']=curpath + changed = module.set_fs_attributes_if_different(tmp_file_args, changed) + except Exception, e: + module.fail_json(path=path, msg='There was an issue creating %s as requested: %s' % (curpath, str(e))) # We already know prev_state is not 'absent', therefore it exists in some form. elif prev_state != 'directory': From 5c17a99a1cbb31d1b834f2f623e87d851ab2a140 Mon Sep 17 00:00:00 2001 From: Mike Putnam Date: Wed, 1 Jul 2015 20:58:17 -0500 Subject: [PATCH 351/464] Upstream docs show launch_config_name as required. http://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_AutoScalingGroup.html Fixes #11209 Ansible behavior is correct, this commit just updates the docs to reflect that correctness. --- cloud/amazon/ec2_asg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 54d051375e6..eaeb141825e 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -43,7 +43,7 @@ options: launch_config_name: description: - Name of the Launch configuration to use for the group. See the ec2_lc module for managing these. - required: false + required: true min_size: description: - Minimum number of instances in group From ee782d822b6df37fbd19fd60ec1fc02c0de08265 Mon Sep 17 00:00:00 2001 From: whiter Date: Thu, 2 Jul 2015 15:24:39 +1000 Subject: [PATCH 352/464] Fixed dicts comparison for tags --- cloud/amazon/ec2_vpc_net.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_vpc_net.py b/cloud/amazon/ec2_vpc_net.py index 41186ed0ab2..ebdd4ed6504 100644 --- a/cloud/amazon/ec2_vpc_net.py +++ b/cloud/amazon/ec2_vpc_net.py @@ -145,7 +145,7 @@ def update_vpc_tags(vpc, module, vpc_obj, tags, name): tags.update({'Name': name}) try: current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id})) - if sorted(current_tags) != sorted(tags): + if cmp(tags, current_tags): vpc.create_tags(vpc_obj.id, tags) return True else: From 3d9a8caa5942b0efc8012b1b8080b5ce04bf652d Mon Sep 17 00:00:00 2001 From: Flyte Date: Wed, 1 Jul 2015 09:32:58 +0100 Subject: [PATCH 353/464] Provide correct kwargs to rds2 connection when making a final snapshot --- cloud/amazon/rds.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 71ead8ad10b..3d6f192b9ab 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -715,7 +715,10 @@ def delete_db_instance_or_snapshot(module, conn): if instance_name: if snapshot: params["skip_final_snapshot"] = False - params["final_snapshot_id"] = snapshot + if has_rds2: + params["final_db_snapshot_identifier"] = snapshot + else: + params["final_snapshot_id"] = snapshot else: params["skip_final_snapshot"] = True result = conn.delete_db_instance(instance_name, **params) From 8f0d462fd0e966fbb04e4fbcf4685a2fd600fee0 Mon Sep 17 00:00:00 2001 From: Robb Wagoner Date: Thu, 2 Jul 2015 06:16:43 -0700 Subject: [PATCH 354/464] remove double dict & fix increment bug --- cloud/amazon/ec2_elb_lb.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 9d626a98194..04be9e2813c 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -393,11 +393,11 @@ class ElbManager(object): # status of instances behind the ELB if info['instances']: - info['instance_health'] = [ dict({ - "instance_id": instance_state.instance_id, - "reason_code": instance_state.reason_code, - "state": instance_state.state, - }) for instance_state in self.elb_conn.describe_instance_health(self.name)] + info['instance_health'] = [ dict( + instance_id = instance_state.instance_id, + reason_code = instance_state.reason_code, + state = instance_state.state + ) for instance_state in self.elb_conn.describe_instance_health(self.name)] else: info['instance_health'] = [] @@ -409,7 +409,7 @@ class ElbManager(object): elif instance_state['state'] == "OutOfService": info['out_of_service_count'] += 1 else: - info['unknown_instance_state_count'] =+ 1 + info['unknown_instance_state_count'] += 1 if check_elb.health_check: info['health_check'] = { From 93754b903f6956a86891197debb83f801b809200 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 3 Jul 2015 14:43:21 -0400 Subject: [PATCH 355/464] updated upgrade to a more sensible default as the previous was prone to confusion fixes #1667 --- packaging/os/apt.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 9172c69763d..19a7c426f5e 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -80,8 +80,8 @@ options: - 'Note: This does not upgrade a specific package, use state=latest for that.' version_added: "1.1" required: false - default: "yes" - choices: [ "yes", "safe", "full", "dist"] + default: "no" + choices: [ "no", "yes", "safe", "full", "dist"] dpkg_options: description: - Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"' @@ -548,7 +548,7 @@ def main(): default_release = dict(default=None, aliases=['default-release']), install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'), force = dict(default='no', type='bool'), - upgrade = dict(choices=['yes', 'safe', 'full', 'dist']), + upgrade = dict(choices=['no', 'yes', 'safe', 'full', 'dist']), dpkg_options = dict(default=DPKG_OPTIONS) ), mutually_exclusive = [['package', 'upgrade', 'deb']], @@ -572,6 +572,10 @@ def main(): APT_GET_CMD = module.get_bin_path("apt-get") p = module.params + + if p['upgrade'] == 'no': + p['upgrade'] = None + if not APTITUDE_CMD and p.get('upgrade', None) in [ 'full', 'safe', 'yes' ]: module.fail_json(msg="Could not find aptitude. Please ensure it is installed.") From c3c2e6ab726f9ea28a7a5d37b2a466740843bb9a Mon Sep 17 00:00:00 2001 From: Scott Miller Date: Sat, 4 Jul 2015 14:45:21 -0400 Subject: [PATCH 356/464] Update cloudformation.py Fix for inaccurate phrasing --- cloud/amazon/cloudformation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index cccdd156f20..abde0ec375c 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -51,7 +51,7 @@ options: template: description: - The local path of the cloudformation template. This parameter is mutually exclusive with 'template_url'. Either one of them is required if "state" parameter is "present" - Must give full path to the file, relative to the playbook. If using roles this may look like "roles/cloudformation/files/cloudformation-example.json" + Must give full path to the file, relative to the working directory. If using roles this may look like "roles/cloudformation/files/cloudformation-example.json" required: false default: null aliases: [] From c786202ee4b6a89cc509348006bfcbeab90a9819 Mon Sep 17 00:00:00 2001 From: Tal Auslander Date: Mon, 6 Jul 2015 11:22:23 +0300 Subject: [PATCH 357/464] use ConvertTo-Bool for the force parameter --- windows/win_get_url.ps1 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index 23463b681c0..02f19b39360 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -40,9 +40,9 @@ Else { Fail-Json $result "missing required argument: dest" } -$force = Get-Attr -obj $params -name "force" "no" +$force = Get-Attr -obj $params -name "force" "no" | ConvertTo-Bool -If ($force -eq "yes" -or -not (Test-Path $dest)) { +If ($force -or -not (Test-Path $dest)) { $client = New-Object System.Net.WebClient Try { From 110f618487de51ba41cf0ce94f2d5574c6f54d09 Mon Sep 17 00:00:00 2001 From: Juho-Mikko Pellinen Date: Wed, 15 Apr 2015 13:43:00 +0300 Subject: [PATCH 358/464] Add support for specifying unique hosted zone identifier --- cloud/amazon/route53.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index b1c8591b25c..30557a2212a 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -35,6 +35,12 @@ options: required: true default: null aliases: [] + hosted_zone_id: + description: + - The Hosted Zone ID of the DNS zone to modify + required: false + default: null + aliases: [] record: description: - The full DNS record to create or delete @@ -195,6 +201,17 @@ EXAMPLES = ''' alias=True alias_hosted_zone_id="{{ elb_zone_id }}" +# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value +# that the entire parameter list must be quoted: +- route53: + command: "create" + zone: "foo.com" + hostes_zone_id: "Z2AABBCCDDEEFF" + record: "localhost.foo.com" + type: "AAAA" + ttl: "7200" + value: "::1" + # Use a routing policy to distribute traffic: - route53: command: "create" @@ -252,6 +269,7 @@ def main(): argument_spec.update(dict( command = dict(choices=['get', 'create', 'delete'], required=True), zone = dict(required=True), + hosted_zone_id = dict(required=False), record = dict(required=True), ttl = dict(required=False, type='int', default=3600), type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True), @@ -275,6 +293,7 @@ def main(): command_in = module.params.get('command') zone_in = module.params.get('zone').lower() + hosted_zone_id_in = module.params.get('hosted_zone_id') ttl_in = module.params.get('ttl') record_in = module.params.get('record').lower() type_in = module.params.get('type') @@ -360,6 +379,8 @@ def main(): record['region'] = rset.region record['failover'] = rset.failover record['health_check'] = rset.health_check + if hosted_zone_id_in: + record['hosted_zone_id'] = hosted_zone_id_in if rset.alias_dns_name: record['alias'] = True record['value'] = rset.alias_dns_name From 71ebe6321b241501e40f0908ce84daf7e918ac8d Mon Sep 17 00:00:00 2001 From: Juho-Mikko Pellinen Date: Wed, 15 Apr 2015 13:43:00 +0300 Subject: [PATCH 359/464] Add support for specifying unique hosted zone identifier --- cloud/amazon/route53.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 30557a2212a..8dd781ffdf2 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -212,6 +212,17 @@ EXAMPLES = ''' ttl: "7200" value: "::1" +# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value +# that the entire parameter list must be quoted: +- route53: + command: "create" + zone: "foo.com" + hostes_zone_id: "Z2AABBCCDDEEFF" + record: "localhost.foo.com" + type: "AAAA" + ttl: "7200" + value: "::1" + # Use a routing policy to distribute traffic: - route53: command: "create" @@ -374,6 +385,8 @@ def main(): record['ttl'] = rset.ttl record['value'] = ','.join(sorted(rset.resource_records)) record['values'] = sorted(rset.resource_records) + if hosted_zone_id_in: + record['hosted_zone_id'] = hosted_zone_id_in record['identifier'] = rset.identifier record['weight'] = rset.weight record['region'] = rset.region From f0ad6c5a1fd3f93d776097619231d1cd4860e520 Mon Sep 17 00:00:00 2001 From: Juho-Mikko Pellinen Date: Mon, 6 Jul 2015 13:02:24 +0300 Subject: [PATCH 360/464] Fix hosted_zone_id after rebase. --- cloud/amazon/route53.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 8dd781ffdf2..a981c6ef2be 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -250,13 +250,13 @@ try: except ImportError: HAS_BOTO = False -def get_zone_by_name(conn, module, zone_name, want_private): - """Finds a zone by name""" +def get_zone_by_name(conn, module, zone_name, want_private, zone_id): + """Finds a zone by name or zone_id""" for zone in conn.get_zones(): # only save this zone id if the private status of the zone matches # the private_zone_in boolean specified in the params private_zone = module.boolean(zone.config.get('PrivateZone', False)) - if private_zone == want_private and zone.name == zone_name: + if private_zone == want_private and ((zone.name == zone_name and zone_id == None) or zone.id.replace('/hostedzone/', '') == zone_id): return zone return None @@ -280,7 +280,7 @@ def main(): argument_spec.update(dict( command = dict(choices=['get', 'create', 'delete'], required=True), zone = dict(required=True), - hosted_zone_id = dict(required=False), + hosted_zone_id = dict(required=False, default=None), record = dict(required=True), ttl = dict(required=False, type='int', default=3600), type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True), @@ -351,7 +351,7 @@ def main(): module.fail_json(msg = e.error_message) # Find the named zone ID - zone = get_zone_by_name(conn, module, zone_in, private_zone_in) + zone = get_zone_by_name(conn, module, zone_in, private_zone_in, hosted_zone_id_in) # Verify that the requested zone is already defined in Route53 if zone is None: From 228c03bd670449813c3d3d45fa4a7767ad924774 Mon Sep 17 00:00:00 2001 From: Juho-Mikko Pellinen Date: Mon, 6 Jul 2015 13:07:33 +0300 Subject: [PATCH 361/464] Add version number --- cloud/amazon/route53.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index a981c6ef2be..e81e5083763 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -39,6 +39,7 @@ options: description: - The Hosted Zone ID of the DNS zone to modify required: false + version_added: 2.0 default: null aliases: [] record: From 041dc8b5877eabd130c79618dedcebb40d3c138b Mon Sep 17 00:00:00 2001 From: Juho-Mikko Pellinen Date: Mon, 6 Jul 2015 13:08:46 +0300 Subject: [PATCH 362/464] Remove empty aliases --- cloud/amazon/route53.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index e81e5083763..f9702cc38ae 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -41,7 +41,6 @@ options: required: false version_added: 2.0 default: null - aliases: [] record: description: - The full DNS record to create or delete From 08b2f3191537b95b6056067aad0416ea0b881b82 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Tue, 31 Mar 2015 16:37:07 -0400 Subject: [PATCH 363/464] Add OpenStack Security Group Rule module --- cloud/openstack/os_security_group_rule.py | 154 ++++++++++++++++++++++ 1 file changed, 154 insertions(+) create mode 100644 cloud/openstack/os_security_group_rule.py diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py new file mode 100644 index 00000000000..d539cf91ee5 --- /dev/null +++ b/cloud/openstack/os_security_group_rule.py @@ -0,0 +1,154 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# Copyright (c) 2013, Benno Joy +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade +except ImportError: + print("failed=True msg='shade is required for this module'") + + +DOCUMENTATION = ''' +--- +module: os_security_group_rule +short_description: Add/Delete rule from an existing security group +extends_documentation_fragment: openstack +version_added: "1.10" +description: + - Add or Remove rule from an existing security group +options: + security_group: + description: + - Name of the security group + required: true + protocol: + description: + - IP protocol + choices: ['tcp', 'udp', 'icmp'] + default: tcp + port_range_min: + description: + - Starting port + required: true + port_range_max: + description: + - Ending port + required: true + remote_ip_prefix: + description: + - Source IP address(es) in CIDR notation (exclusive with remote_group) + required: false + remote_group: + description: + - ID of Security group to link (exclusive with remote_ip_prefix) + required: false + state: + description: + - Should the resource be present or absent. + choices: [present, absent] + default: present + +requirements: ["shade"] +''' +# TODO(mordred): add ethertype and direction + +EXAMPLES = ''' +# Create a security group rule +- os_security_group_rule: cloud=mordred + security_group=group foo + protocol: tcp + port_range_min: 80 + port_range_max: 80 + remote_ip_prefix: 0.0.0.0/0 +''' + + +def _security_group_rule(module, nova_client, action='create', **kwargs): + f = getattr(nova_client.security_group_rules, action) + try: + secgroup = f(**kwargs) + except Exception, e: + module.fail_json(msg='Failed to %s security group rule: %s' % + (action, e.message)) + + +def _get_rule_from_group(module, secgroup): + for rule in secgroup.rules: + if (rule['ip_protocol'] == module.params['protocol'] and + rule['from_port'] == module.params['port_range_min'] and + rule['to_port'] == module.params['port_range_max'] and + rule['ip_range']['cidr'] == module.params['remote_ip_prefix']): + return rule + return None + +def main(): + + argument_spec = openstack_full_argument_spec( + security_group = dict(required=True), + protocol = dict(default='tcp', choices=['tcp', 'udp', 'icmp']), + port_range_min = dict(required=True), + port_range_max = dict(required=True), + remote_ip_prefix = dict(required=False, default=None), + # TODO(mordred): Make remote_group handle name and id + remote_group = dict(required=False, default=None), + state = dict(default='present', choices=['absent', 'present']), + ) + module_kwargs = openstack_module_kwargs( + mutually_exclusive=[ + ['remote_ip_prefix', 'remote_group'], + ) + module = AnsibleModule(argument_spec, **module_kwargs) + + try: + cloud = shade.openstack_cloud(**module.params) + nova_client = cloud.nova_client + changed = False + + secgroup = cloud.get_security_group(module.params['security_group']) + + if module.params['state'] == 'present': + if not secgroup: + module.fail_json(msg='Could not find security group %s' % + module.params['security_group']) + + if not _get_rule_from_group(module, secgroup): + _security_group_rule(module, nova_client, 'create', + parent_group_id=secgroup.id, + ip_protocol=module.params['protocol'], + from_port=module.params['port_range_min'], + to_port=module.params['port_range_max'], + cidr=module.params['remote_ip'], + group_id=module.params['remote_group'], + changed = True + + + if module.params['state'] == 'absent' and secgroup: + rule = _get_rule_from_group(module, secgroup) + if secgroup and rule: + _security_group_rule(module, nova_client, 'delete', + rule=rule['id']) + changed = True + + module.exit_json(changed=changed, result="success") + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +main() From 08b4bb42c4ad99a9c43193b40f62220240da0af8 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 4 Jun 2015 12:03:05 -0400 Subject: [PATCH 364/464] Fix example code syntax --- cloud/openstack/os_security_group_rule.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index d539cf91ee5..8422a920791 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -68,12 +68,13 @@ requirements: ["shade"] EXAMPLES = ''' # Create a security group rule -- os_security_group_rule: cloud=mordred - security_group=group foo - protocol: tcp - port_range_min: 80 - port_range_max: 80 - remote_ip_prefix: 0.0.0.0/0 +- os_security_group_rule: + cloud: mordred + security_group: foo + protocol: tcp + port_range_min: 80 + port_range_max: 80 + remote_ip_prefix: 0.0.0.0/0 ''' From a9301ba918736db08cf5b2b160f91ed4724ba7e8 Mon Sep 17 00:00:00 2001 From: Davide Guerri Date: Thu, 4 Jun 2015 17:34:21 +0100 Subject: [PATCH 365/464] Fix invalid syntax in openstack_module_kwargs call --- cloud/openstack/os_security_group_rule.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 8422a920791..903d694bab3 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -111,6 +111,7 @@ def main(): module_kwargs = openstack_module_kwargs( mutually_exclusive=[ ['remote_ip_prefix', 'remote_group'], + ] ) module = AnsibleModule(argument_spec, **module_kwargs) From d35df1f2170f8347af4b49548a03d265c9b69e15 Mon Sep 17 00:00:00 2001 From: dagnello Date: Mon, 8 Jun 2015 18:27:40 -0700 Subject: [PATCH 366/464] Minor fixes for os_security_group_rule module Was not able to use this module as it was. The changes submitted resolved the issues I ran into in order to get it working. --- cloud/openstack/os_security_group_rule.py | 24 +++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 903d694bab3..fc5397439c0 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -21,13 +21,12 @@ try: except ImportError: print("failed=True msg='shade is required for this module'") - DOCUMENTATION = ''' --- module: os_security_group_rule short_description: Add/Delete rule from an existing security group extends_documentation_fragment: openstack -version_added: "1.10" +version_added: "2.0" description: - Add or Remove rule from an existing security group options: @@ -61,7 +60,6 @@ options: - Should the resource be present or absent. choices: [present, absent] default: present - requirements: ["shade"] ''' # TODO(mordred): add ethertype and direction @@ -84,7 +82,7 @@ def _security_group_rule(module, nova_client, action='create', **kwargs): secgroup = f(**kwargs) except Exception, e: module.fail_json(msg='Failed to %s security group rule: %s' % - (action, e.message)) + (action, e.message)) def _get_rule_from_group(module, secgroup): @@ -92,12 +90,14 @@ def _get_rule_from_group(module, secgroup): if (rule['ip_protocol'] == module.params['protocol'] and rule['from_port'] == module.params['port_range_min'] and rule['to_port'] == module.params['port_range_max'] and - rule['ip_range']['cidr'] == module.params['remote_ip_prefix']): + (rule['ip_range']['cidr'] if 'cidr' in rule['ip_range'] + else None) == (module.params['remote_ip_prefix'] if + 'remote_ip_prefix' in module.params else None)): return rule return None -def main(): +def main(): argument_spec = openstack_full_argument_spec( security_group = dict(required=True), protocol = dict(default='tcp', choices=['tcp', 'udp', 'icmp']), @@ -133,11 +133,14 @@ def main(): ip_protocol=module.params['protocol'], from_port=module.params['port_range_min'], to_port=module.params['port_range_max'], - cidr=module.params['remote_ip'], - group_id=module.params['remote_group'], + cidr=module.params['remote_ip_prefix'] + if 'remote_ip_prefix' in module.params else None, + group_id=module.params['remote_group'] + if 'remote_group' in module.params else + None + ) changed = True - if module.params['state'] == 'absent' and secgroup: rule = _get_rule_from_group(module, secgroup) if secgroup and rule: @@ -153,4 +156,5 @@ def main(): # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -main() + +main() \ No newline at end of file From b98e6663e8cdd29eecb6614ae12a814df972441e Mon Sep 17 00:00:00 2001 From: dagnello Date: Mon, 8 Jun 2015 18:27:40 -0700 Subject: [PATCH 367/464] Minor fixes for os_security_group_rule module Was not able to use this module as it was. The changes submitted resolved the issues I ran into in order to get it working. --- cloud/openstack/os_security_group_rule.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index fc5397439c0..efbc41f1148 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -99,14 +99,14 @@ def _get_rule_from_group(module, secgroup): def main(): argument_spec = openstack_full_argument_spec( - security_group = dict(required=True), - protocol = dict(default='tcp', choices=['tcp', 'udp', 'icmp']), - port_range_min = dict(required=True), - port_range_max = dict(required=True), - remote_ip_prefix = dict(required=False, default=None), + security_group = dict(required=True), + protocol = dict(default='tcp', choices=['tcp', 'udp', 'icmp']), + port_range_min = dict(required=True), + port_range_max = dict(required=True), + remote_ip_prefix = dict(required=False, default=None), # TODO(mordred): Make remote_group handle name and id - remote_group = dict(required=False, default=None), - state = dict(default='present', choices=['absent', 'present']), + remote_group = dict(required=False, default=None), + state = dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs( mutually_exclusive=[ @@ -157,4 +157,4 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -main() \ No newline at end of file +main() From 8f2e70a1c156bde17a117c5a212c71d4d67ccd8f Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Wed, 10 Jun 2015 17:31:43 -0400 Subject: [PATCH 368/464] Update rules mode for latest shade Shade 0.7.0 normalized the security group data that is returned, when using nova, to look more like neutron security group data. This adjusts for that change. --- cloud/openstack/os_security_group_rule.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index efbc41f1148..42e0a6bc6ed 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -86,13 +86,11 @@ def _security_group_rule(module, nova_client, action='create', **kwargs): def _get_rule_from_group(module, secgroup): - for rule in secgroup.rules: - if (rule['ip_protocol'] == module.params['protocol'] and - rule['from_port'] == module.params['port_range_min'] and - rule['to_port'] == module.params['port_range_max'] and - (rule['ip_range']['cidr'] if 'cidr' in rule['ip_range'] - else None) == (module.params['remote_ip_prefix'] if - 'remote_ip_prefix' in module.params else None)): + for rule in secgroup['security_group_rules']: + if (rule['protocol'] == module.params['protocol'] and + rule['port_range_min'] == module.params['port_range_min'] and + rule['port_range_max'] == module.params['port_range_max'] and + rule['remote_ip_prefix'] == module.params['remote_ip_prefix']): return rule return None @@ -136,9 +134,8 @@ def main(): cidr=module.params['remote_ip_prefix'] if 'remote_ip_prefix' in module.params else None, group_id=module.params['remote_group'] - if 'remote_group' in module.params else - None - ) + if 'remote_group' in module.params else None + ) changed = True if module.params['state'] == 'absent' and secgroup: From 5758b4ebdce00bbd18ef8e6967122ebcc6de0cde Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Wed, 10 Jun 2015 17:51:59 -0400 Subject: [PATCH 369/464] Fix id value reference --- cloud/openstack/os_security_group_rule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 42e0a6bc6ed..287f3021a35 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -127,7 +127,7 @@ def main(): if not _get_rule_from_group(module, secgroup): _security_group_rule(module, nova_client, 'create', - parent_group_id=secgroup.id, + parent_group_id=secgroup['id'], ip_protocol=module.params['protocol'], from_port=module.params['port_range_min'], to_port=module.params['port_range_max'], From 5b6c6cac20bc6e1111e0175b2e77c7c3f61a69b5 Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Wed, 10 Jun 2015 18:06:11 -0400 Subject: [PATCH 370/464] Recongnize None and -1 port equivalency shade 0.7.0 represents disabled min/max ports as None (in the neutron style) rather than -1. Recognize this as the same as -1. --- cloud/openstack/os_security_group_rule.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 287f3021a35..64f67fbeec1 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -87,9 +87,12 @@ def _security_group_rule(module, nova_client, action='create', **kwargs): def _get_rule_from_group(module, secgroup): for rule in secgroup['security_group_rules']: + # No port, or -1, will be returned as None + port_range_min = rule['port_range_min'] or -1 + port_range_max = rule['port_range_max'] or -1 if (rule['protocol'] == module.params['protocol'] and - rule['port_range_min'] == module.params['port_range_min'] and - rule['port_range_max'] == module.params['port_range_max'] and + port_range_min == module.params['port_range_min'] and + port_range_max == module.params['port_range_max'] and rule['remote_ip_prefix'] == module.params['remote_ip_prefix']): return rule return None From 16b3b72294ec509b7d327af73ce750c9c25c437a Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Tue, 16 Jun 2015 14:56:04 -0400 Subject: [PATCH 371/464] Update secgroup rules module for latest shade This allows the rules module to work against either nova or neutron for handling security groups. New parameters for 'direction' and 'ethertype' are added. Check mode is supported with this version. --- cloud/openstack/os_security_group_rule.py | 143 ++++++++++++++-------- 1 file changed, 94 insertions(+), 49 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 64f67fbeec1..a5596558710 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -46,15 +46,27 @@ options: port_range_max: description: - Ending port - required: true + required: true remote_ip_prefix: description: - Source IP address(es) in CIDR notation (exclusive with remote_group) - required: false + required: false remote_group: description: - ID of Security group to link (exclusive with remote_ip_prefix) - required: false + required: false + ethertype: + description: + - Must be IPv4 or IPv6, and addresses represented in CIDR must + match the ingress or egress rules. Not all providers support IPv6. + choices: ['IPv4', 'IPv6'] + default: IPv4 + direction: + description: + - The direction in which the security group rule is applied. Not + all providers support egress. + choices: ['egress', 'ingress'] + default: ingress state: description: - Should the resource be present or absent. @@ -76,79 +88,112 @@ EXAMPLES = ''' ''' -def _security_group_rule(module, nova_client, action='create', **kwargs): - f = getattr(nova_client.security_group_rules, action) - try: - secgroup = f(**kwargs) - except Exception, e: - module.fail_json(msg='Failed to %s security group rule: %s' % - (action, e.message)) +def _find_matching_rule(module, secgroup): + """ + Find a rule in the group that matches the module parameters. + :returns: The matching rule dict, or None if no matches. + """ + protocol = module.params['protocol'] + port_range_min = module.params['port_range_min'] + port_range_max = module.params['port_range_max'] + remote_ip_prefix = module.params['remote_ip_prefix'] + ethertype = module.params['ethertype'] + direction = module.params['direction'] -def _get_rule_from_group(module, secgroup): for rule in secgroup['security_group_rules']: - # No port, or -1, will be returned as None - port_range_min = rule['port_range_min'] or -1 - port_range_max = rule['port_range_max'] or -1 - if (rule['protocol'] == module.params['protocol'] and - port_range_min == module.params['port_range_min'] and - port_range_max == module.params['port_range_max'] and - rule['remote_ip_prefix'] == module.params['remote_ip_prefix']): + # No port, or -1, will be returned from shade as None + rule_port_range_min = rule['port_range_min'] or -1 + rule_port_range_max = rule['port_range_max'] or -1 + + if (protocol == rule['protocol'] + and port_range_min == rule_port_range_min + and port_range_max == rule_port_range_max + and remote_ip_prefix == rule['remote_ip_prefix'] + and ethertype == rule['ethertype'] + and direction == rule['direction']): return rule return None +def _system_state_change(module, secgroup): + state = module.params['state'] + if secgroup: + rule_exists = _find_matching_rule(module, secgroup) + else: + return False + + if state == 'present' and not rule_exists: + return True + if state == 'absent' and rule_exists: + return True + return False + + def main(): argument_spec = openstack_full_argument_spec( - security_group = dict(required=True), - protocol = dict(default='tcp', choices=['tcp', 'udp', 'icmp']), - port_range_min = dict(required=True), - port_range_max = dict(required=True), - remote_ip_prefix = dict(required=False, default=None), + security_group = dict(required=True), + protocol = dict(default='tcp', + choices=['tcp', 'udp', 'icmp']), + port_range_min = dict(required=True), + port_range_max = dict(required=True), + remote_ip_prefix = dict(required=False, default=None), # TODO(mordred): Make remote_group handle name and id - remote_group = dict(required=False, default=None), - state = dict(default='present', choices=['absent', 'present']), + remote_group = dict(required=False, default=None), + ethertype = dict(default='IPv4', + choices=['IPv4', 'IPv6']), + direction = dict(default='ingress', + choices=['egress', 'ingress']), + state = dict(default='present', + choices=['absent', 'present']), ) + module_kwargs = openstack_module_kwargs( mutually_exclusive=[ ['remote_ip_prefix', 'remote_group'], ] ) - module = AnsibleModule(argument_spec, **module_kwargs) + + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) + + state = module.params['state'] + security_group = module.params['security_group'] + changed = False try: cloud = shade.openstack_cloud(**module.params) - nova_client = cloud.nova_client - changed = False + secgroup = cloud.get_security_group(security_group) - secgroup = cloud.get_security_group(module.params['security_group']) + if module.check_mode: + module.exit_json(changed=_system_state_change(module, secgroup)) - if module.params['state'] == 'present': + if state == 'present': if not secgroup: module.fail_json(msg='Could not find security group %s' % - module.params['security_group']) - - if not _get_rule_from_group(module, secgroup): - _security_group_rule(module, nova_client, 'create', - parent_group_id=secgroup['id'], - ip_protocol=module.params['protocol'], - from_port=module.params['port_range_min'], - to_port=module.params['port_range_max'], - cidr=module.params['remote_ip_prefix'] - if 'remote_ip_prefix' in module.params else None, - group_id=module.params['remote_group'] - if 'remote_group' in module.params else None - ) + security_group) + + if not _find_matching_rule(module, secgroup): + cloud.create_security_group_rule( + secgroup['id'], + port_range_min=module.params['port_range_min'], + port_range_max=module.params['port_range_max'], + protocol=module.params['protocol'], + remote_ip_prefix=module.params['remote_ip_prefix'], + remote_group_id=module.params['remote_group'], + direction=module.params['direction'], + ethertype=module.params['ethertype'] + ) changed = True - if module.params['state'] == 'absent' and secgroup: - rule = _get_rule_from_group(module, secgroup) - if secgroup and rule: - _security_group_rule(module, nova_client, 'delete', - rule=rule['id']) + if state == 'absent' and secgroup: + rule = _find_matching_rule(module, secgroup) + if rule: + cloud.delete_security_group_rule(rule['id']) changed = True - module.exit_json(changed=changed, result="success") + module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From 0e5942d7e7bfd703fad5797362a0ebe1572674e6 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Wed, 17 Jun 2015 07:39:27 -0400 Subject: [PATCH 372/464] Return rule object --- cloud/openstack/os_security_group_rule.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index a5596558710..15ce00866ae 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -174,8 +174,9 @@ def main(): module.fail_json(msg='Could not find security group %s' % security_group) - if not _find_matching_rule(module, secgroup): - cloud.create_security_group_rule( + rule = _find_matching_rule(module, secgroup): + if not rule: + rule = cloud.create_security_group_rule( secgroup['id'], port_range_min=module.params['port_range_min'], port_range_max=module.params['port_range_max'], @@ -186,6 +187,7 @@ def main(): ethertype=module.params['ethertype'] ) changed = True + module.exit_json(changed=changed, rule=rule, id=rule.id) if state == 'absent' and secgroup: rule = _find_matching_rule(module, secgroup) @@ -193,7 +195,7 @@ def main(): cloud.delete_security_group_rule(rule['id']) changed = True - module.exit_json(changed=changed) + module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) From 9d0c8b0507a19e82a5dc23c9a8a8cac0b24c9f92 Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Wed, 17 Jun 2015 08:30:55 -0400 Subject: [PATCH 373/464] Fix syntax error --- cloud/openstack/os_security_group_rule.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 15ce00866ae..f50c97817e5 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -174,7 +174,7 @@ def main(): module.fail_json(msg='Could not find security group %s' % security_group) - rule = _find_matching_rule(module, secgroup): + rule = _find_matching_rule(module, secgroup) if not rule: rule = cloud.create_security_group_rule( secgroup['id'], From f027e759765e9dd7717b54c308ad1d46410c2cff Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Thu, 18 Jun 2015 19:22:56 -0400 Subject: [PATCH 374/464] Compare ports as strings Ports as returned from shade are ints. They are strings as they come in to the module. --- cloud/openstack/os_security_group_rule.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index f50c97817e5..eea47c0c3e1 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -103,8 +103,16 @@ def _find_matching_rule(module, secgroup): for rule in secgroup['security_group_rules']: # No port, or -1, will be returned from shade as None - rule_port_range_min = rule['port_range_min'] or -1 - rule_port_range_max = rule['port_range_max'] or -1 + if rule['port_range_min'] is None: + rule_port_range_min = "-1" + else: + rule_port_range_min = str(rule['port_range_min']) + + if rule['port_range_max'] is None: + rule_port_range_max = "-1" + else: + rule_port_range_max = str(rule['port_range_max']) + if (protocol == rule['protocol'] and port_range_min == rule_port_range_min From 2e8daa23309ada2bfba8415ea6ec5d764b565f05 Mon Sep 17 00:00:00 2001 From: dagnello Date: Fri, 19 Jun 2015 11:21:51 -0700 Subject: [PATCH 375/464] Resolving issues in rule comparison algorithm Port range min/max values are at times represented as string and compared to int equivalents. This fix explicitly ensures all port range values are ints for proper comparisons. --- cloud/openstack/os_security_group_rule.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index eea47c0c3e1..fc9283940c0 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -91,12 +91,11 @@ EXAMPLES = ''' def _find_matching_rule(module, secgroup): """ Find a rule in the group that matches the module parameters. - :returns: The matching rule dict, or None if no matches. """ protocol = module.params['protocol'] - port_range_min = module.params['port_range_min'] - port_range_max = module.params['port_range_max'] + port_range_min = int(module.params['port_range_min']) + port_range_max = int(module.params['port_range_max']) remote_ip_prefix = module.params['remote_ip_prefix'] ethertype = module.params['ethertype'] direction = module.params['direction'] @@ -104,14 +103,14 @@ def _find_matching_rule(module, secgroup): for rule in secgroup['security_group_rules']: # No port, or -1, will be returned from shade as None if rule['port_range_min'] is None: - rule_port_range_min = "-1" + rule_port_range_min = -1 else: - rule_port_range_min = str(rule['port_range_min']) + rule_port_range_min = int(rule['port_range_min']) if rule['port_range_max'] is None: - rule_port_range_max = "-1" + rule_port_range_max = -1 else: - rule_port_range_max = str(rule['port_range_max']) + rule_port_range_max = int(rule['port_range_max']) if (protocol == rule['protocol'] @@ -195,7 +194,7 @@ def main(): ethertype=module.params['ethertype'] ) changed = True - module.exit_json(changed=changed, rule=rule, id=rule.id) + module.exit_json(changed=changed, rule=rule, id=rule['id']) if state == 'absent' and secgroup: rule = _find_matching_rule(module, secgroup) @@ -212,4 +211,4 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -main() +main() \ No newline at end of file From 9f03302b68b4038fa664230dcbb66920325dbd1f Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 19 Jun 2015 17:17:46 -0400 Subject: [PATCH 376/464] Use int in the parameter list instead of casting --- cloud/openstack/os_security_group_rule.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index fc9283940c0..7d86408b379 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -94,8 +94,8 @@ def _find_matching_rule(module, secgroup): :returns: The matching rule dict, or None if no matches. """ protocol = module.params['protocol'] - port_range_min = int(module.params['port_range_min']) - port_range_max = int(module.params['port_range_max']) + port_range_min = module.params['port_range_min'] + port_range_max = module.params['port_range_max'] remote_ip_prefix = module.params['remote_ip_prefix'] ethertype = module.params['ethertype'] direction = module.params['direction'] @@ -142,8 +142,8 @@ def main(): security_group = dict(required=True), protocol = dict(default='tcp', choices=['tcp', 'udp', 'icmp']), - port_range_min = dict(required=True), - port_range_max = dict(required=True), + port_range_min = dict(required=True, type='int'), + port_range_max = dict(required=True, type='int'), remote_ip_prefix = dict(required=False, default=None), # TODO(mordred): Make remote_group handle name and id remote_group = dict(required=False, default=None), @@ -211,4 +211,4 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -main() \ No newline at end of file +main() From 8664c884174736b803089c3d4a199461dff0af9e Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Tue, 30 Jun 2015 16:51:18 -0400 Subject: [PATCH 377/464] Change required parameters for rules module The ports and protocol are no longer required (and now depends on a new version of shade). --- cloud/openstack/os_security_group_rule.py | 55 ++++++++++++++++++++--- 1 file changed, 49 insertions(+), 6 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 7d86408b379..2ec8e49b68d 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -18,8 +18,10 @@ try: import shade + HAS_SHADE = True except ImportError: - print("failed=True msg='shade is required for this module'") + HAS_SHADE = False + DOCUMENTATION = ''' --- @@ -87,6 +89,41 @@ EXAMPLES = ''' remote_ip_prefix: 0.0.0.0/0 ''' +RETURN = ''' +id: + description: Unique rule UUID. + type: string +direction: + description: The direction in which the security group rule is applied. + type: string + sample: 'egress' +ethertype: + description: One of IPv4 or IPv6. + type: string + sample: 'IPv4' +port_range_min: + description: The minimum port number in the range that is matched by + the security group rule. + type: int + sample: 8000 +port_range_max: + description: The maximum port number in the range that is matched by + the security group rule. + type: int + sample: 8000 +protocol: + description: The protocol that is matched by the security group rule. + type: string + sample: 'tcp' +remote_ip_prefix: + description: The remote IP prefix to be associated with this security group rule. + type: string + sample: '0.0.0.0/0' +security_group_id: + description: The security group ID to associate with this security group rule. + type: string +''' + def _find_matching_rule(module, secgroup): """ @@ -140,10 +177,12 @@ def _system_state_change(module, secgroup): def main(): argument_spec = openstack_full_argument_spec( security_group = dict(required=True), - protocol = dict(default='tcp', - choices=['tcp', 'udp', 'icmp']), - port_range_min = dict(required=True, type='int'), - port_range_max = dict(required=True, type='int'), + # NOTE(Shrews): None is an acceptable protocol value for + # Neutron, but Nova will balk at this. + protocol = dict(default=None, + choices=[None, 'tcp', 'udp', 'icmp']), + port_range_min = dict(required=False, type='int'), + port_range_max = dict(required=False, type='int'), remote_ip_prefix = dict(required=False, default=None), # TODO(mordred): Make remote_group handle name and id remote_group = dict(required=False, default=None), @@ -165,6 +204,9 @@ def main(): supports_check_mode=True, **module_kwargs) + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + state = module.params['state'] security_group = module.params['security_group'] changed = False @@ -211,4 +253,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.openstack import * -main() +if __name__ == '__main__': + main() From 6933407cd40bb655eaaa6847336421018a6b9b1e Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Mon, 6 Jul 2015 12:16:29 -0400 Subject: [PATCH 378/464] Correct port matching logic Port matching logic did not take into account recent shade change to equate (None, None) to (1, 65535) when Nova is the backend. Also, this encapsulates the port matching logic into a single function and heavily documents the logic. --- cloud/openstack/os_security_group_rule.py | 102 ++++++++++++++++++---- 1 file changed, 84 insertions(+), 18 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 2ec8e49b68d..7e0486d81db 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -76,7 +76,6 @@ options: default: present requirements: ["shade"] ''' -# TODO(mordred): add ethertype and direction EXAMPLES = ''' # Create a security group rule @@ -87,6 +86,38 @@ EXAMPLES = ''' port_range_min: 80 port_range_max: 80 remote_ip_prefix: 0.0.0.0/0 + +# Create a security group rule for ping +- os_security_group_rule: + cloud: mordred + security_group: foo + protocol: icmp + remote_ip_prefix: 0.0.0.0/0 + +# Another way to create the ping rule +- os_security_group_rule: + cloud: mordred + security_group: foo + protocol: icmp + port_range_min: -1 + port_range_max: -1 + remote_ip_prefix: 0.0.0.0/0 + +# Create a TCP rule covering all ports +- os_security_group_rule: + cloud: mordred + security_group: foo + protocol: tcp + port_range_min: 1 + port_range_max: 65535 + remote_ip_prefix: 0.0.0.0/0 + +# Another way to create the TCP rule above (defaults to all ports) +- os_security_group_rule: + cloud: mordred + security_group: foo + protocol: tcp + remote_ip_prefix: 0.0.0.0/0 ''' RETURN = ''' @@ -125,37 +156,72 @@ security_group_id: ''' +def _ports_match(protocol, module_min, module_max, rule_min, rule_max): + """ + Capture the complex port matching logic. + + The port values coming in for the module might be -1 (for ICMP), + which will work only for Nova, but this is handled by shade. Likewise, + they might be None, which works for Neutron, but not Nova. This too is + handled by shade. Since shade will consistently return these port + values as None, we need to convert any -1 values input to the module + to None here for comparison. + + For TCP and UDP protocols, None values for both min and max are + represented as the range 1-65535 for Nova, but remain None for + Neutron. Shade returns the full range when Nova is the backend (since + that is how Nova stores them), and None values for Neutron. If None + values are input to the module for both values, then we need to adjust + for comparison. + """ + + # Check if the user is supplying -1 for ICMP. + if protocol == 'icmp': + if module_min and int(module_min) == -1: + module_min = None + if module_max and int(module_max) == -1: + module_max = None + + # Check if user is supplying None values for full TCP/UDP port range. + if protocol in ['tcp', 'udp'] and module_min is None and module_max is None: + if (rule_min and int(rule_min) == 1 + and rule_max and int(rule_max) == 65535): + # (None, None) == (1, 65535) + return True + + # Sanity check to make sure we don't have type comparison issues. + if module_min: + module_min = int(module_min) + if module_max: + module_max = int(module_max) + if rule_min: + rule_min = int(rule_min) + if rule_max: + rule_max = int(rule_max) + + return module_min == rule_min and module_max == rule_max + + def _find_matching_rule(module, secgroup): """ Find a rule in the group that matches the module parameters. :returns: The matching rule dict, or None if no matches. """ protocol = module.params['protocol'] - port_range_min = module.params['port_range_min'] - port_range_max = module.params['port_range_max'] remote_ip_prefix = module.params['remote_ip_prefix'] ethertype = module.params['ethertype'] direction = module.params['direction'] for rule in secgroup['security_group_rules']: - # No port, or -1, will be returned from shade as None - if rule['port_range_min'] is None: - rule_port_range_min = -1 - else: - rule_port_range_min = int(rule['port_range_min']) - - if rule['port_range_max'] is None: - rule_port_range_max = -1 - else: - rule_port_range_max = int(rule['port_range_max']) - - if (protocol == rule['protocol'] - and port_range_min == rule_port_range_min - and port_range_max == rule_port_range_max and remote_ip_prefix == rule['remote_ip_prefix'] and ethertype == rule['ethertype'] - and direction == rule['direction']): + and direction == rule['direction'] + and _ports_match(protocol, + module.params['port_range_min'], + module.params['port_range_max'], + rule['port_range_min'], + rule['port_range_max'])): return rule return None From dd9c29286154d7643c0392d576e44ea4421ada3c Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 6 Jul 2015 18:52:11 -0400 Subject: [PATCH 379/464] Update docstring to show port ranges as optional --- cloud/openstack/os_security_group_rule.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 7e0486d81db..91059aca015 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -39,16 +39,18 @@ options: protocol: description: - IP protocol - choices: ['tcp', 'udp', 'icmp'] - default: tcp + choices: ['tcp', 'udp', 'icmp', None] + default: None port_range_min: description: - Starting port - required: true + required: false + default: None port_range_max: description: - Ending port - required: true + required: false + default: None remote_ip_prefix: description: - Source IP address(es) in CIDR notation (exclusive with remote_group) From b4911a47d1589693791bdf40ed979e239d69d6d2 Mon Sep 17 00:00:00 2001 From: Juho-Mikko Pellinen Date: Tue, 7 Jul 2015 16:31:47 +0300 Subject: [PATCH 380/464] Change the default flag value to None to prevent AWS complaining: "Instance creation failed => InvalidBlockDeviceMapping: the encrypted flag cannot be specified since device /dev/sda1 has a snapshot specified." --- cloud/amazon/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index b79395fb3a1..840cf4fed1f 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -701,7 +701,7 @@ def create_block_device(module, ec2, volume): volume_type=volume.get('device_type'), delete_on_termination=volume.get('delete_on_termination', False), iops=volume.get('iops'), - encrypted=volume.get('encrypted', False)) + encrypted=volume.get('encrypted', None)) def boto_supports_param_in_spot_request(ec2, param): """ From c57d70ad207e1c5a935f7af7a93a890f44e364d1 Mon Sep 17 00:00:00 2001 From: Sean Chittenden Date: Tue, 7 Jul 2015 12:06:52 -0700 Subject: [PATCH 381/464] Fix group mod and group add for FreeBSD --- system/group.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/system/group.py b/system/group.py index 53ab5f904dc..d97dd2176ac 100644 --- a/system/group.py +++ b/system/group.py @@ -233,7 +233,8 @@ class FreeBsdGroup(Group): def group_add(self, **kwargs): cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name] if self.gid is not None: - cmd.append('-g %d' % int(self.gid)) + cmd.append('-g') + cmd.append('%d' % int(self.gid)) return self.execute_command(cmd) def group_mod(self, **kwargs): @@ -241,7 +242,8 @@ class FreeBsdGroup(Group): info = self.group_info() cmd_len = len(cmd) if self.gid is not None and int(self.gid) != info[2]: - cmd.append('-g %d' % int(self.gid)) + cmd.append('-g') + cmd.append('%d' % int(self.gid)) # modify the group if cmd will do anything if cmd_len != len(cmd): if self.module.check_mode: From d9db201b9a60dd7680694ae9af113ae21082306b Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Tue, 7 Jul 2015 20:30:07 +0100 Subject: [PATCH 382/464] Update vsphere_guest.py --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 8d1b7946688..863755aab26 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -71,7 +71,7 @@ options: from_template: version_added: "1.9" description: - - Specifies if the VM should be deployed from a template (cannot be run with state). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template. + - Specifies if the VM should be deployed from a template (mutually exclusive with state parameter). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template. default: no choices: ['yes', 'no'] template_src: From d6d6186aef83ea807a25124e6fd423d7df42a998 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Tue, 7 Jul 2015 20:30:33 +0100 Subject: [PATCH 383/464] Update vsphere_guest.py --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 863755aab26..5f5925b994d 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -71,7 +71,7 @@ options: from_template: version_added: "1.9" description: - - Specifies if the VM should be deployed from a template (mutually exclusive with state parameter). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template. + - Specifies if the VM should be deployed from a template (mutually exclusive with 'state' parameter). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template. default: no choices: ['yes', 'no'] template_src: From 76398781bac86caf6006e67a77a917155a02f3b4 Mon Sep 17 00:00:00 2001 From: Jesse Keating Date: Tue, 7 Jul 2015 15:29:47 -0700 Subject: [PATCH 384/464] Fix up docs --- cloud/openstack/os_floating_ip.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_floating_ip.py b/cloud/openstack/os_floating_ip.py index 5bd29240a67..10827012ae8 100644 --- a/cloud/openstack/os_floating_ip.py +++ b/cloud/openstack/os_floating_ip.py @@ -64,12 +64,12 @@ options: - When attaching a floating IP address, specify whether we should wait for it to appear as attached. required: false - default false + default: false timeout: description: - Time to wait for an IP address to appear as attached. See wait. required: false - default 60 + default: 60 state: description: - Should the resource be present or absent. From de89f9f99a9e2679f3f8ab8aaf84afb163f4c375 Mon Sep 17 00:00:00 2001 From: Jesse Keating Date: Tue, 7 Jul 2015 16:10:44 -0700 Subject: [PATCH 385/464] Plumb ipv6 modes into os_subnet Shade already supports these, we just need to plumb them into the module code. --- cloud/openstack/os_subnet.py | 38 ++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_subnet.py b/cloud/openstack/os_subnet.py index f96ce9fd633..b62eb10b0cc 100644 --- a/cloud/openstack/os_subnet.py +++ b/cloud/openstack/os_subnet.py @@ -92,6 +92,18 @@ options: - A list of host route dictionaries for the subnet. required: false default: None + ipv6_ra_mode: + description: + - IPv6 router advertisement mode + choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'] + required: false + default: None + ipv6_address_mode: + description: + - IPv6 address mode + choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'] + required: false + default: None requirements: - "python >= 2.6" - "shade" @@ -117,6 +129,19 @@ EXAMPLES = ''' - os_subnet: state=absent name=net1subnet + +# Create an ipv6 stateless subnet +- os_subnet: + state: present + name: intv6 + network_name: internal + ip_version: 6 + cidr: 2db8:1::/64 + dns_nameservers: + - 2001:4860:4860::8888 + - 2001:4860:4860::8844 + ipv6_ra_mode: dhcpv6-stateless + ipv6_address_mode: dhcpv6-stateless ''' @@ -163,6 +188,7 @@ def _system_state_change(module, subnet): def main(): + ipv6_mode_choices = ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'] argument_spec = openstack_full_argument_spec( name=dict(required=True), network_name=dict(default=None), @@ -174,6 +200,8 @@ def main(): allocation_pool_start=dict(default=None), allocation_pool_end=dict(default=None), host_routes=dict(default=None, type='list'), + ipv6_ra_mode=dict(default=None, choice=ipv6_mode_choices), + ipv6_address_mode=dict(default=None, choice=ipv6_mode_choices), state=dict(default='present', choices=['absent', 'present']), ) @@ -196,6 +224,8 @@ def main(): pool_start = module.params['allocation_pool_start'] pool_end = module.params['allocation_pool_end'] host_routes = module.params['host_routes'] + ipv6_ra_mode = module.params['ipv6_ra_mode'] + ipv6_a_mode = module.params['ipv6_address_mode'] # Check for required parameters when state == 'present' if state == 'present': @@ -226,7 +256,9 @@ def main(): gateway_ip=gateway_ip, dns_nameservers=dns, allocation_pools=pool, - host_routes=host_routes) + host_routes=host_routes, + ipv6_ra_mode=ipv6_ra_mode, + ipv6_address_mode=ipv6_a_mode) changed = True else: if _needs_update(subnet, module): @@ -236,7 +268,9 @@ def main(): gateway_ip=gateway_ip, dns_nameservers=dns, allocation_pools=pool, - host_routes=host_routes) + host_routes=host_routes, + ipv6_ra_mode=ipv6_ra_mode, + ipv6_address_mode=ipv6_a_mode) changed = True else: changed = False From 706f5e25cc7c045b817cc940a439b57b3c570a06 Mon Sep 17 00:00:00 2001 From: Nic O'Connor Date: Sat, 31 Jan 2015 15:24:44 +0000 Subject: [PATCH 386/464] Added the ability to Linked_clone from snapshot --- cloud/vmware/vsphere_guest.py | 37 +++++++++++++++++++++++++++++------ 1 file changed, 31 insertions(+), 6 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 8ad7df41dea..58393ecb754 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -79,6 +79,18 @@ options: description: - Name of the source template to deploy from default: None + linked_clone: + version_added: "2.0" + description: + - Boolean. Creates a linked clone copy of the specified vm requires snapshot + required: false + default: false + snapshot: + version_added: "2.0" + description: + - Name of the snapshot you want to link clone from + required: false + default: none vm_disk: description: - A key, value list of disks and their sizes and which datastore to keep it in. @@ -513,7 +525,7 @@ def vmdisk_id(vm, current_datastore_name): return id_list -def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name): +def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name, linked_clone, snapshot): vmTemplate = vsphere_client.get_vm_by_name(template_src) vmTarget = None @@ -545,9 +557,14 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo try: if vmTarget: changed = False + elif linked_clone and snapshot != None: + #Check linked_clone and snapshot value + vmTemplate.clone(guest, resourcepool=rpmor, linked=linked_clone, snapshot=snapshot) + changed = True else: vmTemplate.clone(guest, resourcepool=rpmor) changed = True + vsphere_client.disconnect() module.exit_json(changed=changed) except Exception as e: @@ -1148,9 +1165,11 @@ def main(): 'reconfigured' ], default='present'), - vmware_guest_facts=dict(required=False, choices=BOOLEANS), - from_template=dict(required=False, choices=BOOLEANS), + vmware_guest_facts=dict(required=False, type='bool'), + from_template=dict(required=False, type='bool'), template_src=dict(required=False, type='str'), + linked_clone=dict(required=False, default=False, type='bool'), + snapshot=dict(required=False, default=None, type='str'), guest=dict(required=True, type='str'), vm_disk=dict(required=False, type='dict', default={}), vm_nic=dict(required=False, type='dict', default={}), @@ -1159,7 +1178,7 @@ def main(): vm_hw_version=dict(required=False, default=None, type='str'), resource_pool=dict(required=False, default=None, type='str'), cluster=dict(required=False, default=None, type='str'), - force=dict(required=False, choices=BOOLEANS, default=False), + force=dict(required=False, type='bool', default=False), esxi=dict(required=False, type='dict', default={}), @@ -1176,8 +1195,9 @@ def main(): 'esxi' ], ['resource_pool', 'cluster'], - ['from_template', 'resource_pool', 'template_src'] + ['from_template', 'resource_pool', 'template_src'], ], + required_if=[('linked_clone', True, ['snapshot'])], ) if not HAS_PYSPHERE: @@ -1200,6 +1220,9 @@ def main(): cluster = module.params['cluster'] template_src = module.params['template_src'] from_template = module.params['from_template'] + linked_clone = module.params['linked_clone'] + snapshot = module.params['snapshot'] + # CONNECT TO THE SERVER viserver = VIServer() @@ -1279,7 +1302,9 @@ def main(): guest=guest, template_src=template_src, module=module, - cluster_name=cluster + cluster_name=cluster, + linked_clone=linked_clone, + snapshot=snapshot ) if state in ['restarted', 'reconfigured']: module.fail_json( From e33f0930753876be85f8f851e3cfdc81e219a73c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 9 Jul 2015 00:27:44 -0400 Subject: [PATCH 387/464] added ignore hidden to assemble --- files/assemble.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/files/assemble.py b/files/assemble.py index 1f9a952d04a..ad73c7b4354 100644 --- a/files/assemble.py +++ b/files/assemble.py @@ -79,6 +79,12 @@ options: U(http://docs.python.org/2/library/re.html). required: false default: null + ignore_hidden: + description: + - A boolean that controls if files that start with a '.' will be included or not. + required: false + default: false + version_added: "2.0" author: "Stephen Fromm (@sfromm)" extends_documentation_fragment: files ''' @@ -94,7 +100,7 @@ EXAMPLES = ''' # =========================================== # Support method -def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None): +def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False): ''' assemble a file from a directory of fragments ''' tmpfd, temp_path = tempfile.mkstemp() tmp = os.fdopen(tmpfd,'w') @@ -105,7 +111,7 @@ def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None): if compiled_regexp and not compiled_regexp.search(f): continue fragment = "%s/%s" % (src_path, f) - if not os.path.isfile(fragment): + if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')): continue fragment_content = file(fragment).read() @@ -148,6 +154,7 @@ def main(): backup=dict(default=False, type='bool'), remote_src=dict(default=False, type='bool'), regexp = dict(required=False), + ignore_hidden = dict(default=False, type='bool'), ), add_file_common_args=True ) @@ -162,6 +169,7 @@ def main(): delimiter = module.params['delimiter'] regexp = module.params['regexp'] compiled_regexp = None + ignore_hidden = module.params['ignore_hidden'] if not os.path.exists(src): module.fail_json(msg="Source (%s) does not exist" % src) @@ -175,7 +183,7 @@ def main(): except re.error, e: module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (e, regexp)) - path = assemble_from_fragments(src, delimiter, compiled_regexp) + path = assemble_from_fragments(src, delimiter, compiled_regexp, ignore_hidden) path_hash = module.sha1(path) if os.path.exists(dest): From f190f98b06a00abc3a3ba0432a52ec44a1924f86 Mon Sep 17 00:00:00 2001 From: "Hennadiy (Gena) Verkh" Date: Thu, 9 Jul 2015 11:39:46 +0200 Subject: [PATCH 388/464] Update uri.py Added methods 'TRACE', 'CONNECT' from https://www.rfc-editor.org/rfc/rfc2616.txt, section 5.1.1 Added method 'REFRESH' --- network/basics/uri.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index 7be1cc92159..1e70d319fd0 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -73,6 +73,7 @@ options: description: - The HTTP method of the request or response. required: false + choices: [ "GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH", "TRACE", "CONNECT", "REFRESH" ] default: "GET" return_content: description: @@ -340,7 +341,7 @@ def main(): password = dict(required=False, default=None), body = dict(required=False, default=None), body_format = dict(required=False, default='raw', choices=['raw', 'json']), - method = dict(required=False, default='GET'), + method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH', 'TRACE', 'CONNECT', 'REFRESH']), return_content = dict(required=False, default='no', type='bool'), force_basic_auth = dict(required=False, default='no', type='bool'), follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']), From fbb6277a37faa78ab8a01dee0e7877af372234ce Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 9 Jul 2015 09:33:10 -0400 Subject: [PATCH 389/464] Fix a small typo in parameter processing --- cloud/openstack/os_client_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_client_config.py b/cloud/openstack/os_client_config.py index 2c4af5c8c08..7128b06ffcb 100644 --- a/cloud/openstack/os_client_config.py +++ b/cloud/openstack/os_client_config.py @@ -61,7 +61,7 @@ def main(): config = os_client_config.OpenStackConfig() clouds = [] for cloud in config.get_all_clouds(): - if not module.params['clouds'] or cloud.name in module.param['clouds']: + if not p['clouds'] or cloud.name in p['clouds']: cloud.config['name'] = cloud.name clouds.append(cloud.config) module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds))) From 164043bd6db6c40fcece8614f8fce3fb66cdeed7 Mon Sep 17 00:00:00 2001 From: vanga Date: Thu, 9 Jul 2015 20:42:54 +0530 Subject: [PATCH 390/464] Throw error if encryption is set while passing a snapshot id --- cloud/amazon/ec2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 840cf4fed1f..a6b378c7e9c 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -692,6 +692,8 @@ def create_block_device(module, ec2, volume): size = volume.get('volume_size', snapshot.volume_size) if int(volume['iops']) > MAX_IOPS_TO_SIZE_RATIO * size: module.fail_json(msg = 'IOPS must be at most %d times greater than size' % MAX_IOPS_TO_SIZE_RATIO) + if 'encrypted' in volume: + module.fail_json(msg = 'You can not set encyrption when creating a volume from a snapshot') if 'ephemeral' in volume: if 'snapshot' in volume: module.fail_json(msg = 'Cannot set both ephemeral and snapshot') @@ -702,7 +704,6 @@ def create_block_device(module, ec2, volume): delete_on_termination=volume.get('delete_on_termination', False), iops=volume.get('iops'), encrypted=volume.get('encrypted', None)) - def boto_supports_param_in_spot_request(ec2, param): """ Check if Boto library has a in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0. From d46c036b75a82a07c42731154677512d069c4386 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 9 Jul 2015 08:16:17 -0700 Subject: [PATCH 391/464] Add notes about loop squashing and 1.9.2 change to install packages in one yum transaction --- packaging/os/yum.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 14339b4c18b..29d6b0100dc 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -118,10 +118,22 @@ options: choices: ["yes", "no"] aliases: [] -notes: [] +notes: + - When used with a loop of package names in a playbook, ansible optimizes + the call to the yum module. Instead of calling the module with a single + package each time through the loop, ansible calls the module once with all + of the package names from the loop. + - In versions prior to 1.9.2 this module installed and removed each package + given to the yum module separately. This caused problems when packages + specified by filename or url had to be installed or removed together. In + 1.9.2 this was fixed so that packages are installed in one yum + transaction. However, if one of the packages adds a new yum repository + that the other packages come from (such as epel-release) then that package + needs to be installed in a separate task. This mimics yum's command line + behaviour. # informational: requirements for nodes requirements: [ yum ] -author: +author: - "Ansible Core Team" - "Seth Vidal" ''' From 59225ca7b021611ace3f1212cfd578c0cd520559 Mon Sep 17 00:00:00 2001 From: Chris Meyers Date: Thu, 9 Jul 2015 10:43:11 -0400 Subject: [PATCH 392/464] ensure password or ssh cert specified --- cloud/azure/azure.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/azure/azure.py b/cloud/azure/azure.py index f1eea46525e..c4fa41a6eb1 100644 --- a/cloud/azure/azure.py +++ b/cloud/azure/azure.py @@ -567,8 +567,8 @@ def main(): module.fail_json(msg='location parameter is required for new instance') if not module.params.get('storage_account'): module.fail_json(msg='storage_account parameter is required for new instance') - if not module.params.get('password'): - module.fail_json(msg='password parameter is required for new instance') + if not (module.params.get('password') or module.params.get('ssh_cert_path')): + module.fail_json(msg='password or ssh_cert_path parameter is required for new instance') (changed, public_dns_name, deployment) = create_virtual_machine(module, azure) module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__))) From 68bd17b15e94a74cc70ebb49d6161bbb0254c487 Mon Sep 17 00:00:00 2001 From: Evan Carter Date: Thu, 9 Jul 2015 15:29:00 -0400 Subject: [PATCH 393/464] Adding default cooldown to AWS ASG --- cloud/amazon/ec2_asg.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index eaeb141825e..5cf0282011c 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -109,6 +109,12 @@ options: default: EC2 version_added: "1.7" choices: ['EC2', 'ELB'] + default_cooldown: + description: + The number of seconds after a scaling activity completes before another can begin. + required: false + default: 300 seconds + version_added: "2.0" wait_timeout: description: - how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option. @@ -374,6 +380,7 @@ def create_autoscaling_group(connection, module): set_tags = module.params.get('tags') health_check_period = module.params.get('health_check_period') health_check_type = module.params.get('health_check_type') + default_cooldown = module.params.get('default_cooldown') wait_for_instances = module.params.get('wait_for_instances') as_groups = connection.get_all_groups(names=[group_name]) wait_timeout = module.params.get('wait_timeout') @@ -413,7 +420,8 @@ def create_autoscaling_group(connection, module): connection=connection, tags=asg_tags, health_check_period=health_check_period, - health_check_type=health_check_type) + health_check_type=health_check_type, + default_cooldown=default_cooldown) try: connection.create_auto_scaling_group(ag) @@ -774,6 +782,7 @@ def main(): tags=dict(type='list', default=[]), health_check_period=dict(type='int', default=300), health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), + default_cooldown=dict(type='int', default=300), wait_for_instances=dict(type='bool', default=True) ), ) From 9144785c42c082172aeb72544ec68503e39d788c Mon Sep 17 00:00:00 2001 From: Nicholas O'Connor Date: Thu, 9 Jul 2015 16:46:39 -0400 Subject: [PATCH 394/464] Created option snapshot_to_clone. When specified, snapshot_to_clone will create a linked clone copy of the VM. --- cloud/vmware/vsphere_guest.py | 30 ++++++++++-------------------- 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 58393ecb754..8ddc0fa7e50 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -79,16 +79,10 @@ options: description: - Name of the source template to deploy from default: None - linked_clone: - version_added: "2.0" + snapshot_to_clone: + version_added 2.0 description: - - Boolean. Creates a linked clone copy of the specified vm requires snapshot - required: false - default: false - snapshot: - version_added: "2.0" - description: - - Name of the snapshot you want to link clone from + - String. When specified, snapshot_to_clone will create a linked clone copy of the VM, Snapshot must already be taken in vCenter. required: false default: none vm_disk: @@ -525,7 +519,7 @@ def vmdisk_id(vm, current_datastore_name): return id_list -def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name, linked_clone, snapshot): +def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name, snapshot_to_clone): vmTemplate = vsphere_client.get_vm_by_name(template_src) vmTarget = None @@ -557,9 +551,9 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo try: if vmTarget: changed = False - elif linked_clone and snapshot != None: - #Check linked_clone and snapshot value - vmTemplate.clone(guest, resourcepool=rpmor, linked=linked_clone, snapshot=snapshot) + elif snapshot_to_clone != None: + #check if snapshot_to_clone is specified, Create a Linked Clone instead of a full clone. + vmTemplate.clone(guest, resourcepool=rpmor, linked=True, snapshot=snapshot_to_clone) changed = True else: vmTemplate.clone(guest, resourcepool=rpmor) @@ -1168,8 +1162,7 @@ def main(): vmware_guest_facts=dict(required=False, type='bool'), from_template=dict(required=False, type='bool'), template_src=dict(required=False, type='str'), - linked_clone=dict(required=False, default=False, type='bool'), - snapshot=dict(required=False, default=None, type='str'), + snapshot_to_clone=dict(required=False, default=None, type='str'), guest=dict(required=True, type='str'), vm_disk=dict(required=False, type='dict', default={}), vm_nic=dict(required=False, type='dict', default={}), @@ -1197,7 +1190,6 @@ def main(): ['resource_pool', 'cluster'], ['from_template', 'resource_pool', 'template_src'], ], - required_if=[('linked_clone', True, ['snapshot'])], ) if not HAS_PYSPHERE: @@ -1220,8 +1212,7 @@ def main(): cluster = module.params['cluster'] template_src = module.params['template_src'] from_template = module.params['from_template'] - linked_clone = module.params['linked_clone'] - snapshot = module.params['snapshot'] + snapshot_to_clone = module.params['snapshot_to_clone'] # CONNECT TO THE SERVER @@ -1303,8 +1294,7 @@ def main(): template_src=template_src, module=module, cluster_name=cluster, - linked_clone=linked_clone, - snapshot=snapshot + snapshot_to_clone=snapshot_to_clone ) if state in ['restarted', 'reconfigured']: module.fail_json( From 440b395f377972e7c9de66f74ad1875f51c50fd0 Mon Sep 17 00:00:00 2001 From: Chris Faulkner Date: Thu, 9 Jul 2015 13:04:13 -0700 Subject: [PATCH 395/464] Report change status on django_manage collectstatic. --- web_infrastructure/django_manage.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py index 201cd08303b..2637446d6f3 100644 --- a/web_infrastructure/django_manage.py +++ b/web_infrastructure/django_manage.py @@ -165,6 +165,9 @@ def syncdb_filter_output(line): def migrate_filter_output(line): return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) +def collectstatic_filter_output(line): + return "0 static files" not in line + def main(): command_allowed_param_map = dict( cleanup=(), From 4b1b10fa20217cb2e22d88f94d0b176a49dceebb Mon Sep 17 00:00:00 2001 From: Iiro Uusitalo Date: Tue, 7 Oct 2014 13:04:34 +0300 Subject: [PATCH 396/464] Refactor force basic auth, now all modules which use fetch_url() can use force_basic_auth --- network/basics/get_url.py | 12 ++++++++++++ network/basics/uri.py | 10 ---------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 646c0e42784..9ab039ebb4b 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -110,6 +110,15 @@ options: parameter is not specified, the C(url_password) parameter will not be used. required: false version_added: '1.6' + force_basic_auth: + description: + - httplib2, the library used by the uri module only sends authentication information when a webservice + responds to an initial request with a 401 status. Since some basic auth services do not properly + send a 401, logins will fail. This option forces the sending of the Basic authentication header + upon initial request. + required: false + choices: [ "yes", "no" ] + default: "no" others: description: - all arguments accepted by the M(file) module also work here @@ -125,6 +134,9 @@ EXAMPLES=''' - name: download file with sha256 check get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf sha256sum=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c + +- name: download file and force basic auth + get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf force_basic_auth=yes ''' import urlparse diff --git a/network/basics/uri.py b/network/basics/uri.py index 8095eaffe67..bd1557c7a0f 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -23,7 +23,6 @@ import cgi import shutil import tempfile -import base64 import datetime try: import json @@ -369,7 +368,6 @@ def main(): body_format = dict(required=False, default='raw', choices=['raw', 'json']), method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH']), return_content = dict(required=False, default='no', type='bool'), - force_basic_auth = dict(required=False, default='no', type='bool'), follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']), creates = dict(required=False, default=None), removes = dict(required=False, default=None), @@ -394,7 +392,6 @@ def main(): method = module.params['method'] dest = module.params['dest'] return_content = module.params['return_content'] - force_basic_auth = module.params['force_basic_auth'] redirects = module.params['follow_redirects'] creates = module.params['creates'] removes = module.params['removes'] @@ -434,13 +431,6 @@ def main(): module.exit_json(stdout="skipped, since %s does not exist" % removes, changed=False, stderr=False, rc=0) - # httplib2 only sends authentication after the server asks for it with a 401. - # Some 'basic auth' servies fail to send a 401 and require the authentication - # up front. This creates the Basic authentication header and sends it immediately. - if force_basic_auth: - dict_headers["Authorization"] = "Basic {0}".format(base64.b64encode("{0}:{1}".format(user, password))) - - # Make the request resp, content, dest = uri(module, url, dest, user, password, body, body_format, method, dict_headers, redirects, socket_timeout, validate_certs) resp['status'] = int(resp['status']) From afd02221845cea71f21d01a3d0a00d00e6548648 Mon Sep 17 00:00:00 2001 From: Iiro Uusitalo Date: Fri, 10 Jul 2015 08:42:01 +0300 Subject: [PATCH 397/464] uri.py is not using module_utils/urls.py from ansible core --- network/basics/uri.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/network/basics/uri.py b/network/basics/uri.py index bd1557c7a0f..8095eaffe67 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -23,6 +23,7 @@ import cgi import shutil import tempfile +import base64 import datetime try: import json @@ -368,6 +369,7 @@ def main(): body_format = dict(required=False, default='raw', choices=['raw', 'json']), method = dict(required=False, default='GET', choices=['GET', 'POST', 'PUT', 'HEAD', 'DELETE', 'OPTIONS', 'PATCH']), return_content = dict(required=False, default='no', type='bool'), + force_basic_auth = dict(required=False, default='no', type='bool'), follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']), creates = dict(required=False, default=None), removes = dict(required=False, default=None), @@ -392,6 +394,7 @@ def main(): method = module.params['method'] dest = module.params['dest'] return_content = module.params['return_content'] + force_basic_auth = module.params['force_basic_auth'] redirects = module.params['follow_redirects'] creates = module.params['creates'] removes = module.params['removes'] @@ -431,6 +434,13 @@ def main(): module.exit_json(stdout="skipped, since %s does not exist" % removes, changed=False, stderr=False, rc=0) + # httplib2 only sends authentication after the server asks for it with a 401. + # Some 'basic auth' servies fail to send a 401 and require the authentication + # up front. This creates the Basic authentication header and sends it immediately. + if force_basic_auth: + dict_headers["Authorization"] = "Basic {0}".format(base64.b64encode("{0}:{1}".format(user, password))) + + # Make the request resp, content, dest = uri(module, url, dest, user, password, body, body_format, method, dict_headers, redirects, socket_timeout, validate_certs) resp['status'] = int(resp['status']) From 9acf10face033dda6d5b1f570fb35cbd3deabac5 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 10 Jul 2015 13:51:04 -0400 Subject: [PATCH 398/464] Correctly default crypt_scheme in htpasswd --- web_infrastructure/htpasswd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web_infrastructure/htpasswd.py b/web_infrastructure/htpasswd.py index e567a776559..361a131ef2d 100644 --- a/web_infrastructure/htpasswd.py +++ b/web_infrastructure/htpasswd.py @@ -190,7 +190,7 @@ def main(): path=dict(required=True, aliases=["dest", "destfile"]), name=dict(required=True, aliases=["username"]), password=dict(required=False, default=None), - crypt_scheme=dict(required=False, default=None), + crypt_scheme=dict(required=False, default="apr_md5_crypt"), state=dict(required=False, default="present"), create=dict(type='bool', default='yes'), From 10df7b97eebe358d4ee716f76aa401587f023f0b Mon Sep 17 00:00:00 2001 From: Joel Thompson Date: Wed, 20 May 2015 15:39:17 -0400 Subject: [PATCH 399/464] Adding ability to filter AWS Route 53 private hosted zones by attached VPC --- cloud/amazon/route53.py | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index f9702cc38ae..e3f6c42735a 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -138,6 +138,15 @@ options: required: false default: null version_added: "2.0" + vpc_id: + description: + - When used in conjunction with private_zone: true, this will only modify + records in the private hosted zone attached to this VPC. This allows you + to have multiple private hosted zones, all with the same name, attached + to different VPCs. + required: false + default: null + version_added: "2.0" author: "Bruce Pennypacker (@bpennypacker)" extends_documentation_fragment: aws ''' @@ -250,14 +259,26 @@ try: except ImportError: HAS_BOTO = False -def get_zone_by_name(conn, module, zone_name, want_private, zone_id): +def get_zone_by_name(conn, module, zone_name, want_private, zone_id, want_vpc_id): """Finds a zone by name or zone_id""" for zone in conn.get_zones(): # only save this zone id if the private status of the zone matches # the private_zone_in boolean specified in the params private_zone = module.boolean(zone.config.get('PrivateZone', False)) if private_zone == want_private and ((zone.name == zone_name and zone_id == None) or zone.id.replace('/hostedzone/', '') == zone_id): - return zone + if want_vpc_id: + # NOTE: These details aren't available in other boto methods, hence the necessary + # extra API call + zone_details = conn.get_hosted_zone(zone.id)['GetHostedZoneResponse'] + # this is to deal with this boto bug: https://github.com/boto/boto/pull/2882 + if isinstance(zone_details['VPCs'], dict): + if zone_details['VPCs']['VPC']['VPCId'] == want_vpc_id: + return zone + else: # Forward compatibility for when boto fixes that bug + if want_vpc_id in [v['VPCId'] for v in zone_details['VPCs']]: + return zone + else: + return zone return None @@ -295,6 +316,7 @@ def main(): region = dict(required=False), health_check = dict(required=False), failover = dict(required=False), + vpc_id = dict(required=False), ) ) module = AnsibleModule(argument_spec=argument_spec) @@ -318,6 +340,7 @@ def main(): region_in = module.params.get('region') health_check_in = module.params.get('health_check') failover_in = module.params.get('failover') + vpc_id_in = module.params.get('vpc_id') region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) @@ -344,6 +367,11 @@ def main(): elif not alias_hosted_zone_id_in: module.fail_json(msg = "parameter 'alias_hosted_zone_id' required for alias create/delete") + if vpc_id_in and not private_zone_in: + module.fail_json(msg="parameter 'private_zone' must be true when specifying parameter" + " 'vpc_id'") + + # connect to the route53 endpoint try: conn = Route53Connection(**aws_connect_kwargs) @@ -351,7 +379,7 @@ def main(): module.fail_json(msg = e.error_message) # Find the named zone ID - zone = get_zone_by_name(conn, module, zone_in, private_zone_in, hosted_zone_id_in) + zone = get_zone_by_name(conn, module, zone_in, private_zone_in, hosted_zone_id_in, vpc_id_in) # Verify that the requested zone is already defined in Route53 if zone is None: From b106a83d0a4225e20754179b71010a600bd8bc77 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Fri, 10 Jul 2015 16:33:00 -0400 Subject: [PATCH 400/464] Set force=yes as the default, add force parameter to module docs. --- windows/win_get_url.ps1 | 2 +- windows/win_get_url.py | 22 ++++++++++++++++++---- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index 02f19b39360..46979c129f2 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -40,7 +40,7 @@ Else { Fail-Json $result "missing required argument: dest" } -$force = Get-Attr -obj $params -name "force" "no" | ConvertTo-Bool +$force = Get-Attr -obj $params -name "force" "yes" | ConvertTo-Bool If ($force -or -not (Test-Path $dest)) { $client = New-Object System.Net.WebClient diff --git a/windows/win_get_url.py b/windows/win_get_url.py index 585d3e2aa81..a34f23890b5 100644 --- a/windows/win_get_url.py +++ b/windows/win_get_url.py @@ -27,20 +27,28 @@ module: win_get_url version_added: "1.7" short_description: Fetches a file from a given URL description: - - Fetches a file from a URL and saves to locally + - Fetches a file from a URL and saves to locally options: url: description: - The full URL of a file to download required: true default: null - aliases: [] dest: description: - - The absolute path of the location to save the file at the URL. Be sure to include a filename and extension as appropriate. + - The absolute path of the location to save the file at the URL. Be sure + to include a filename and extension as appropriate. + required: true + default: null + force: + description: + - If C(yes), will always download the file. If C(no), will only + download the file if it does not exist or the remote file has been + modified more recently than the local file. + version_added: "2.0" required: false + choices: [ "yes", "no" ] default: yes - aliases: [] author: "Paul Durivage (@angstwad)" ''' @@ -54,4 +62,10 @@ $ ansible -i hosts -c winrm -m win_get_url -a "url=http://www.example.com/earthr win_get_url: url: 'http://www.example.com/earthrise.jpg' dest: 'C:\Users\RandomUser\earthrise.jpg' + +- name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg' only if modified + win_get_url: + url: 'http://www.example.com/earthrise.jpg' + dest: 'C:\Users\RandomUser\earthrise.jpg' + force: no ''' From c0e4c50eebc579a89a8377b0e84864d206c49937 Mon Sep 17 00:00:00 2001 From: Apoorva Kulkarni Date: Thu, 9 Jul 2015 13:38:14 -0700 Subject: [PATCH 401/464] s3 module: Add missing version tag to "encrypt" parameter --- cloud/amazon/s3.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 095befe173a..7b6990e25e3 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -50,6 +50,7 @@ options: - When set for PUT mode, asks for server-side encryption required: false default: no + version_added: "2.0" expiration: description: - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation. From ab5b5e881973530bea1a48c353e731cb013e7464 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 11 Jul 2015 00:15:02 -0400 Subject: [PATCH 402/464] corrected version added --- database/mysql/mysql_variables.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py index f50ed740539..a2ab0767b55 100644 --- a/database/mysql/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -53,7 +53,7 @@ options: - mysql host to connect required: False login_port: - version_added: "1.9" + version_added: "2.0" description: - mysql port to connect required: False From a7f33ee5333eed9f50f32819f113b2a469dc4570 Mon Sep 17 00:00:00 2001 From: Frank van Tol Date: Mon, 13 Jul 2015 16:45:41 +0200 Subject: [PATCH 403/464] Remove default for engine_version Redis and memcached have different engine version numbering, there can not be a shared default value. --- cloud/amazon/elasticache.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index 3ec0fc2e351..f163ad312e8 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -42,7 +42,6 @@ options: description: - The version number of the cache engine required: false - default: 1.4.14 node_type: description: - The compute and memory capacity of the nodes in the cache cluster @@ -477,7 +476,7 @@ def main(): state={'required': True, 'choices': ['present', 'absent', 'rebooted']}, name={'required': True}, engine={'required': False, 'default': 'memcached'}, - cache_engine_version={'required': False, 'default': '1.4.14'}, + cache_engine_version={'required': False}, node_type={'required': False, 'default': 'cache.m1.small'}, num_nodes={'required': False, 'default': None, 'type': 'int'}, cache_port={'required': False, 'default': 11211, 'type': 'int'}, From a85640c36883ca6083efbfdebd963bfb72f5bad9 Mon Sep 17 00:00:00 2001 From: Frank van Tol Date: Mon, 13 Jul 2015 16:52:30 +0200 Subject: [PATCH 404/464] Update elasticache.py --- cloud/amazon/elasticache.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index f163ad312e8..6586a018a8d 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -42,6 +42,7 @@ options: description: - The version number of the cache engine required: false + default: none node_type: description: - The compute and memory capacity of the nodes in the cache cluster From 300656ca070bb9f5861535b4ffb030b8192c8502 Mon Sep 17 00:00:00 2001 From: Frank van Tol Date: Mon, 13 Jul 2015 17:21:16 +0200 Subject: [PATCH 405/464] Remove default port value, it does not work in _requires_destroy_and_create logic When creating a Redis cluster, every run it gets destroyed and recreated because the port number of memcached is used as the default. --- cloud/amazon/elasticache.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index 6586a018a8d..31ed4696628 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -56,7 +56,7 @@ options: description: - The port number on which each of the cache nodes will accept connections required: false - default: 11211 + default: none cache_subnet_group: description: - The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc @@ -480,7 +480,7 @@ def main(): cache_engine_version={'required': False}, node_type={'required': False, 'default': 'cache.m1.small'}, num_nodes={'required': False, 'default': None, 'type': 'int'}, - cache_port={'required': False, 'default': 11211, 'type': 'int'}, + cache_port={'required': False, 'type': 'int'}, cache_subnet_group={'required': False, 'default': None}, cache_security_groups={'required': False, 'default': [default], 'type': 'list'}, From 9e11f5fd1534480877f2a7a73339c538962fefa8 Mon Sep 17 00:00:00 2001 From: Mitchell Ludwig Date: Mon, 13 Jul 2015 17:51:32 -0600 Subject: [PATCH 406/464] Improved stat documentation --- files/stat.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/files/stat.py b/files/stat.py index 5f79874d9fd..2e088fc8dbd 100644 --- a/files/stat.py +++ b/files/stat.py @@ -58,6 +58,23 @@ EXAMPLES = ''' - fail: msg="Whoops! file ownership has changed" when: st.stat.pw_name != 'root' +# Determine if a path exists and is a symlink. Note that if the path does +# not exist, and we test sym.stat.islnk, it will fail with an error. So +# therefore, we must test whether it is defined. +# Run this to understand the structure, the skipped ones do not pass the +# check performed by 'when' +- stat: path=/path/to/something + register: sym +- debug: msg="islnk isn't defined (path doesn't exist)" + when: sym.stat.islnk is not defined +- debug: msg="islnk is defined (path must exist)" + when: sym.stat.islnk is defined +- debug: msg="Path exists and is a symlink" + when: sym.stat.islnk is defined and sym.stat.islnk +- debug: msg="Path exists and isn't a symlink" + when: sym.stat.islnk is defined and sym.stat.islnk == False + + # Determine if a path exists and is a directory. Note that we need to test # both that p.stat.isdir actually exists, and also that it's set to true. - stat: path=/path/to/something From e916b04e91d02f7fd5d30dccb7b9eee922b3040a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 10:18:37 -0400 Subject: [PATCH 407/464] Also document in example that unarchive download was added in 2.0 --- files/unarchive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/unarchive.py b/files/unarchive.py index 3ee83de0dcd..2b373a8e7fb 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -83,7 +83,7 @@ EXAMPLES = ''' # Unarchive a file that is already on the remote machine - unarchive: src=/tmp/foo.zip dest=/usr/local/bin copy=no -# Unarchive a file that needs to be downloaded +# Unarchive a file that needs to be downloaded (added in 2.0) - unarchive: src=https://example.com/example.zip dest=/usr/local/bin copy=no ''' From e6ecca8809e9263170ab6abdd7398e5540dcb58b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 14 Jul 2015 07:27:09 -0700 Subject: [PATCH 408/464] Minor touch ups of vsphere_guest code. --- cloud/vmware/vsphere_guest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index b390facda2f..002ef44664e 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -80,7 +80,7 @@ options: - Name of the source template to deploy from default: None snapshot_to_clone: - version_added 2.0 + version_added "2.0" description: - String. When specified, snapshot_to_clone will create a linked clone copy of the VM, Snapshot must already be taken in vCenter. required: false @@ -619,7 +619,7 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo try: if vmTarget: changed = False - elif snapshot_to_clone != None: + elif snapshot_to_clone is not None: #check if snapshot_to_clone is specified, Create a Linked Clone instead of a full clone. vmTemplate.clone(guest, resourcepool=rpmor, linked=True, snapshot=snapshot_to_clone) changed = True From 01d4c432b004fae9f6bcd9cef45c4d669879d888 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 11:39:40 -0400 Subject: [PATCH 409/464] Revert "ec2_lc - include all launch config properties in the return" --- cloud/amazon/ec2_lc.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 0721b4e203d..818e8efbb50 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -241,8 +241,7 @@ def create_launch_config(connection, module): module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time), image_id=result.image_id, arn=result.launch_configuration_arn, - security_groups=result.security_groups, instance_type=result.instance_type, - result=result) + security_groups=result.security_groups, instance_type=instance_type) def delete_launch_config(connection, module): From e1067ef670063b188fa8e8994faa89296f2a72ae Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 12:02:03 -0400 Subject: [PATCH 410/464] Revert "Revert "ec2_lc - include all launch config properties in the return"" --- cloud/amazon/ec2_lc.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 818e8efbb50..0721b4e203d 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -241,7 +241,8 @@ def create_launch_config(connection, module): module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time), image_id=result.image_id, arn=result.launch_configuration_arn, - security_groups=result.security_groups, instance_type=instance_type) + security_groups=result.security_groups, instance_type=result.instance_type, + result=result) def delete_launch_config(connection, module): From b80ec0a33544dbea868c2548002db6f749401a70 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 16:54:49 -0400 Subject: [PATCH 411/464] fixed minor doc issues --- cloud/amazon/ec2_asg.py | 2 +- cloud/amazon/route53.py | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 5cf0282011c..efcd66606b8 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -111,7 +111,7 @@ options: choices: ['EC2', 'ELB'] default_cooldown: description: - The number of seconds after a scaling activity completes before another can begin. + - The number of seconds after a scaling activity completes before another can begin. required: false default: 300 seconds version_added: "2.0" diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index e3f6c42735a..c659843b9a3 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -140,10 +140,8 @@ options: version_added: "2.0" vpc_id: description: - - When used in conjunction with private_zone: true, this will only modify - records in the private hosted zone attached to this VPC. This allows you - to have multiple private hosted zones, all with the same name, attached - to different VPCs. + - "When used in conjunction with private_zone: true, this will only modify records in the private hosted zone attached to this VPC." + - This allows you to have multiple private hosted zones, all with the same name, attached to different VPCs. required: false default: null version_added: "2.0" From 291fef3b34ea5510f031816d9c569f54098b8bec Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 14 Jul 2015 17:03:21 -0400 Subject: [PATCH 412/464] fixed version added, reworded description a bit --- cloud/vmware/vsphere_guest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 002ef44664e..91f479549d9 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -80,9 +80,9 @@ options: - Name of the source template to deploy from default: None snapshot_to_clone: - version_added "2.0" description: - - String. When specified, snapshot_to_clone will create a linked clone copy of the VM, Snapshot must already be taken in vCenter. + - A string that when specified, will create a linked clone copy of the VM. Snapshot must already be taken in vCenter. + version_added: "2.0" required: false default: none vm_disk: From dd691779a1ab60127c6109015226dca18879e7a0 Mon Sep 17 00:00:00 2001 From: otdw Date: Tue, 14 Jul 2015 15:56:14 -0700 Subject: [PATCH 413/464] removed required together for resource pools, clusters, and template deployments. fixes inability to deploy from template on vsphere clusters without resource pools. Also, resource pools and cluster should not be required together as they are independant in vsphere --- cloud/vmware/vsphere_guest.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 91f479549d9..9ed6ede21c2 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -1256,8 +1256,7 @@ def main(): 'vm_hardware', 'esxi' ], - ['resource_pool', 'cluster'], - ['from_template', 'resource_pool', 'template_src'], + ['from_template', 'template_src'], ], ) From 985cdf2c281c78a9f0861ed8c2f77752b3854812 Mon Sep 17 00:00:00 2001 From: HAMSIK Adam Date: Sat, 6 Jun 2015 00:09:56 +0200 Subject: [PATCH 414/464] Add module parameter for security group name. This make ec2_elb_lb module consitent with others --- cloud/amazon/ec2_elb_lb.py | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 04be9e2813c..504efff10e7 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -56,6 +56,11 @@ options: require: false default: None version_added: "1.6" + security_group_names: + description: + - A list of security group names to apply to the elb + require: false + default: None health_check: description: - An associative array of health check configuration settings (see example) @@ -361,7 +366,8 @@ class ElbManager(object): if not check_elb: info = { 'name': self.name, - 'status': self.status + 'status': self.status, + 'region': self.region } else: try: @@ -389,6 +395,7 @@ class ElbManager(object): 'out_of_service_count': 0, 'in_service_count': 0, 'unknown_instance_state_count': 0 + 'region': self.region } # status of instances behind the ELB @@ -816,6 +823,7 @@ def main(): zones={'default': None, 'required': False, 'type': 'list'}, purge_zones={'default': False, 'required': False, 'type': 'bool'}, security_group_ids={'default': None, 'required': False, 'type': 'list'}, + security_group_names={'default': None, 'required': False, 'type': 'list'}, health_check={'default': None, 'required': False, 'type': 'dict'}, subnets={'default': None, 'required': False, 'type': 'list'}, purge_subnets={'default': False, 'required': False, 'type': 'bool'}, @@ -844,6 +852,7 @@ def main(): zones = module.params['zones'] purge_zones = module.params['purge_zones'] security_group_ids = module.params['security_group_ids'] + security_group_names = module.params['security_group_names'] health_check = module.params['health_check'] subnets = module.params['subnets'] purge_subnets = module.params['purge_subnets'] @@ -858,6 +867,23 @@ def main(): if state == 'present' and not (zones or subnets): module.fail_json(msg="At least one availability zone or subnet is required for ELB creation") + if security_group_ids and security_group_names: + module.fail_json(msg = str("Use only one type of parameter (security_group_ids) or (security_group_names)")) + elif security_group_names: + security_group_ids = [] + try: + ec2 = ec2_connect(module) + grp_details = ec2.get_all_security_groups() + + for group_name in security_group_names: + if isinstance(group_name, basestring): + group_name = [group_name] + + group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] + security_group_ids.extend(group_id) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg = str(e)) + elb_man = ElbManager(module, name, listeners, purge_listeners, zones, purge_zones, security_group_ids, health_check, subnets, purge_subnets, scheme, From 959c65c7e05665f1c8779307784f350c82e6fa6a Mon Sep 17 00:00:00 2001 From: HAMSIK Adam Date: Sun, 7 Jun 2015 00:04:15 +0200 Subject: [PATCH 415/464] Add version --- cloud/amazon/ec2_elb_lb.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 504efff10e7..2b8c76cefc3 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -61,6 +61,7 @@ options: - A list of security group names to apply to the elb require: false default: None + version_added: "2.0" health_check: description: - An associative array of health check configuration settings (see example) @@ -73,7 +74,7 @@ options: aliases: ['aws_region', 'ec2_region'] subnets: description: - - A list of VPC subnets to use when creating ELB. Zones should be empty if using this. + - A list of VPC subnets to use when creating ELB. Zones should be empty if using this. required: false default: None aliases: [] @@ -82,7 +83,7 @@ options: description: - Purge existing subnet on ELB that are not found in subnets required: false - default: false + default: false version_added: "1.7" scheme: description: @@ -152,7 +153,7 @@ EXAMPLES = """ name: "test-vpc" scheme: internal state: present - subnets: + subnets: - subnet-abcd1234 - subnet-1a2b3c4d listeners: @@ -218,7 +219,7 @@ EXAMPLES = """ instance_port: 80 purge_zones: yes -# Creates a ELB and assigns a list of subnets to it. +# Creates a ELB and assigns a list of subnets to it. - local_action: module: ec2_elb_lb state: present @@ -302,10 +303,10 @@ class ElbManager(object): """Handles ELB creation and destruction""" def __init__(self, module, name, listeners=None, purge_listeners=None, - zones=None, purge_zones=None, security_group_ids=None, + zones=None, purge_zones=None, security_group_ids=None, health_check=None, subnets=None, purge_subnets=None, scheme="internet-facing", connection_draining_timeout=None, - cross_az_load_balancing=None, + cross_az_load_balancing=None, stickiness=None, region=None, **aws_connect_params): self.module = module @@ -449,7 +450,7 @@ class ElbManager(object): else: info['cross_az_load_balancing'] = 'no' - # return stickiness info? + # return stickiness info? return info @@ -629,7 +630,7 @@ class ElbManager(object): self._attach_subnets(subnets_to_attach) if subnets_to_detach: self._detach_subnets(subnets_to_detach) - + def _set_zones(self): """Determine which zones need to be enabled or disabled on the ELB""" if self.zones: @@ -734,7 +735,7 @@ class ElbManager(object): else: self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0]) self.changed = True - + self._set_listener_policy(listeners_dict, policy) def select_stickiness_policy(self): @@ -801,7 +802,7 @@ class ElbManager(object): else: self._set_listener_policy(listeners_dict) - + def _get_health_check_target(self): """Compose target string from healthcheck parameters""" protocol = self.health_check['ping_protocol'].upper() From 6d6da470c8310b9bc5f846387f4dbb359b2d31b3 Mon Sep 17 00:00:00 2001 From: HAMSIK Adam Date: Thu, 9 Jul 2015 23:42:44 +0200 Subject: [PATCH 416/464] Fix missing , --- cloud/amazon/ec2_elb_lb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 2b8c76cefc3..ce353527f5a 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -395,7 +395,7 @@ class ElbManager(object): 'instances': [instance.id for instance in check_elb.instances], 'out_of_service_count': 0, 'in_service_count': 0, - 'unknown_instance_state_count': 0 + 'unknown_instance_state_count': 0, 'region': self.region } From e3d42562830755faa246da172da7e690f0a81792 Mon Sep 17 00:00:00 2001 From: HAMSIK Adam Date: Wed, 15 Jul 2015 10:45:38 +0200 Subject: [PATCH 417/464] Use mutually_exclusive in AnsibleModule --- cloud/amazon/ec2_elb_lb.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index ce353527f5a..f2a04863923 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -837,6 +837,7 @@ def main(): module = AnsibleModule( argument_spec=argument_spec, + mutually_exclusive = [['security_group_ids', 'security_group_names']] ) if not HAS_BOTO: @@ -868,9 +869,7 @@ def main(): if state == 'present' and not (zones or subnets): module.fail_json(msg="At least one availability zone or subnet is required for ELB creation") - if security_group_ids and security_group_names: - module.fail_json(msg = str("Use only one type of parameter (security_group_ids) or (security_group_names)")) - elif security_group_names: + if security_group_names: security_group_ids = [] try: ec2 = ec2_connect(module) From 0ca732baafab8b347a322481f1ad296eea9ce929 Mon Sep 17 00:00:00 2001 From: Mischa ter Smitten Date: Wed, 15 Jul 2015 12:00:23 +0200 Subject: [PATCH 418/464] The tilde expansion doesn't work with user.home --- system/user.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/system/user.py b/system/user.py index 7c3fa4c8594..33a3ba24d37 100644 --- a/system/user.py +++ b/system/user.py @@ -271,6 +271,9 @@ class User(object): self.update_password = module.params['update_password'] self.expires = None + if module.params['home'] is not None: + self.home = os.path.expanduser(module.params['home']) + if module.params['expires']: try: self.expires = time.gmtime(module.params['expires']) From 6672205f49907fb65ab5e103c9a20b502a1333e5 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Wed, 15 Jul 2015 21:11:01 -0400 Subject: [PATCH 419/464] docker: permit empty or false pid The `docker` Python module only accepts `None` or `'host'` as arguments. This makes it difficult to conditionally set the `pid` attribute using standard Ansible syntax. This change converts any value that evaluates as boolean `False` to `None`, which includes empty strings: pid: As well as an explicit `false`: pid: false This permits the following to work as intended: - hosts: localhost tasks: - name: starting container docker: docker_api_version: 1.18 image: larsks/mini-httpd name: web pid: "{{ container_pid|default('') }}" If `container_pid` is set to `host` somewhere, this will create a Docker container with `pid=host`; otherwise, this will create a container with normal isolated pid namespace. --- cloud/docker/docker.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 2bbbbd158a2..71505e258fe 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1270,6 +1270,10 @@ class DockerManager(object): if params['restart_policy']['Name'] == 'on-failure': params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] + # docker_py only accepts 'host' or None + if 'pid' in optionals and not optionals['pid']: + optionals['pid'] = None + if optionals['pid'] is not None: self.ensure_capability('pid') params['pid_mode'] = optionals['pid'] From a9e8cae82e50ee15ee2f1f93dc9ff5d78a85ead1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Jul 2015 11:36:40 -0400 Subject: [PATCH 420/464] attempt to fix check mode when state='absent' --- cloud/amazon/ec2_key.py | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/cloud/amazon/ec2_key.py b/cloud/amazon/ec2_key.py index a9217bd69db..b59c50034d6 100644 --- a/cloud/amazon/ec2_key.py +++ b/cloud/amazon/ec2_key.py @@ -127,25 +127,23 @@ def main(): if state == 'absent': if key: '''found a match, delete it''' - try: - key.delete() - if wait: - start = time.time() - action_complete = False - while (time.time() - start) < wait_timeout: - if not ec2.get_key_pair(name): - action_complete = True - break - time.sleep(1) - if not action_complete: - module.fail_json(msg="timed out while waiting for the key to be removed") - except Exception, e: - module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e)) - else: - key = None - changed = True - else: - '''no match found, no changes required''' + if not module.check_mode: + try: + key.delete() + if wait: + start = time.time() + action_complete = False + while (time.time() - start) < wait_timeout: + if not ec2.get_key_pair(name): + action_complete = True + break + time.sleep(1) + if not action_complete: + module.fail_json(msg="timed out while waiting for the key to be removed") + except Exception, e: + module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e)) + key = None + changed = True # Ensure requested key is present elif state == 'present': From 444a2ad808d3f794b1e646e391b7352c5373675b Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Thu, 16 Jul 2015 15:25:39 -0400 Subject: [PATCH 421/464] Do not erroneously mask exceptions There was a catch-all `except` statement in `create_containers`: try: containers = do_create(count, params) except: self.pull_image() containers = do_create(count, params) This would mask a variety of errors that should be exposed, including API compatability errors (as in #1707) and common Python exceptions (KeyError, ValueError, etc) that could result from errors in the code. This change makes the `except` statement more specific, and only attempts to pull the image and start a container if the original create attempt failed due to a 404 error from the docker API. --- cloud/docker/docker.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 71505e258fe..131ac59e0ae 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -389,6 +389,7 @@ from urlparse import urlparse try: import docker.client import docker.utils + import docker.errors from requests.exceptions import RequestException except ImportError: HAS_DOCKER_PY = False @@ -1322,7 +1323,10 @@ class DockerManager(object): try: containers = do_create(count, params) - except: + except docker.errors.APIError as e: + if e.response.status_code != 404: + raise + self.pull_image() containers = do_create(count, params) From efb6088c27c84a352df5ad92a60bbd1302017946 Mon Sep 17 00:00:00 2001 From: Maksim Losev Date: Mon, 27 Apr 2015 11:58:20 +0300 Subject: [PATCH 422/464] Use HostConfig object when creating container with Docker Remote API > 1.15 This is mlosev's patch (from #1208), rebased against devel as of 2790af2. It resolves #1707, which was caused by an API incompatibility between the docker module and server API version 1.19. --- cloud/docker/docker.py | 62 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 55 insertions(+), 7 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 131ac59e0ae..f2e2b50e9dc 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -530,6 +530,7 @@ class DockerManager(object): 'extra_hosts': ((0, 7, 0), '1.3.1'), 'pid': ((1, 0, 0), '1.17'), 'log_driver': ((1, 2, 0), '1.18'), + 'host_config': ((0, 7, 0), '1.15'), # Clientside only 'insecure_registry': ((0, 5, 0), '0.0') } @@ -739,6 +740,52 @@ class DockerManager(object): else: return None + def get_start_params(self): + """ + Create start params + """ + params = { + 'lxc_conf': self.lxc_conf, + 'binds': self.binds, + 'port_bindings': self.port_bindings, + 'publish_all_ports': self.module.params.get('publish_all_ports'), + 'privileged': self.module.params.get('privileged'), + 'links': self.links, + 'network_mode': self.module.params.get('net'), + } + + optionals = {} + for optional_param in ('dns', 'volumes_from', 'restart_policy', + 'restart_policy_retry', 'pid'): + optionals[optional_param] = self.module.params.get(optional_param) + + if optionals['dns'] is not None: + self.ensure_capability('dns') + params['dns'] = optionals['dns'] + + if optionals['volumes_from'] is not None: + self.ensure_capability('volumes_from') + params['volumes_from'] = optionals['volumes_from'] + + if optionals['restart_policy'] is not None: + self.ensure_capability('restart_policy') + params['restart_policy'] = { 'Name': optionals['restart_policy'] } + if params['restart_policy']['Name'] == 'on-failure': + params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] + + if optionals['pid'] is not None: + self.ensure_capability('pid') + params['pid_mode'] = optionals['pid'] + + return params + + def get_host_config(self): + """ + Create HostConfig object + """ + params = self.get_start_params() + return docker.utils.create_host_config(**params) + def get_port_bindings(self, ports): """ Parse the `ports` string into a port bindings dict for the `start_container` call. @@ -1292,16 +1339,10 @@ class DockerManager(object): return docker.utils.create_host_config(**params) def create_containers(self, count=1): - try: - mem_limit = _human_to_bytes(self.module.params.get('memory_limit')) - except ValueError as e: - self.module.fail_json(msg=str(e)) - params = {'image': self.module.params.get('image'), 'command': self.module.params.get('command'), 'ports': self.exposed_ports, 'volumes': self.volumes, - 'mem_limit': mem_limit, 'environment': self.env, 'hostname': self.module.params.get('hostname'), 'domainname': self.module.params.get('domainname'), @@ -1309,9 +1350,11 @@ class DockerManager(object): 'name': self.module.params.get('name'), 'stdin_open': self.module.params.get('stdin_open'), 'tty': self.module.params.get('tty'), - 'host_config': self.create_host_config(), } + if self.ensure_capability('host_config', fail=False): + params['host_config'] = self.get_host_config() + def do_create(count, params): results = [] for _ in range(count): @@ -1333,6 +1376,11 @@ class DockerManager(object): return containers def start_containers(self, containers): + params = {} + + if not self.ensure_capability('host_config', fail=False): + params = self.get_start_params() + for i in containers: self.client.start(i) self.increment_counter('started') From 963eb242f166ced5fd7904c31a72fc9feb1b0613 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 16 Jul 2015 16:52:56 -0400 Subject: [PATCH 423/464] updated to add missing 'use' option --- packaging/os/package.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/packaging/os/package.py b/packaging/os/package.py index 7c94b98a941..288ca83a772 100644 --- a/packaging/os/package.py +++ b/packaging/os/package.py @@ -23,7 +23,10 @@ DOCUMENTATION = ''' --- module: package version_added: 2.0 -author: Ansible Core Team +author: + - Ansible Inc +maintainers: + - Ansible Core Team short_description: Generic OS package manager description: - Installs, upgrade and removes packages using the underlying OS package manager. @@ -36,6 +39,11 @@ options: description: - Whether to install (C(present), C(latest)), or remove (C(absent)) a package. required: true + use: + description: + - The required package manager module to use (yum, apt, etc). The default 'auto' will use existing facts or try to autodetect it. + required: false + default: auto requirements: - Whatever is required for the package plugins specific for each system. notes: From 6aac888c7da8df9d524f8c26f7594056e490aeb1 Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Thu, 16 Jul 2015 15:04:55 -0400 Subject: [PATCH 424/464] Add new os_nova_flavor module. The os_nova_flavor module allows a user with administrative privileges to create and delete nova flavors. --- cloud/openstack/os_nova_flavor.py | 237 ++++++++++++++++++++++++++++++ 1 file changed, 237 insertions(+) create mode 100644 cloud/openstack/os_nova_flavor.py diff --git a/cloud/openstack/os_nova_flavor.py b/cloud/openstack/os_nova_flavor.py new file mode 100644 index 00000000000..82b3a53aa3d --- /dev/null +++ b/cloud/openstack/os_nova_flavor.py @@ -0,0 +1,237 @@ +#!/usr/bin/python + +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +try: + import shade + HAS_SHADE = True +except ImportError: + HAS_SHADE = False + +DOCUMENTATION = ''' +--- +module: os_nova_flavor +short_description: Manage OpenStack compute flavors +extends_documentation_fragment: openstack +version_added: "2.0" +author: "David Shrewsbury (@Shrews)" +description: + - Add or remove flavors from OpenStack. +options: + state: + description: + - Indicate desired state of the resource. When I(state) is 'present', + then I(ram), I(vcpus), and I(disk) are all required. There are no + default values for those parameters. + choices: ['present', 'absent'] + required: false + default: present + name: + description: + - Flavor name. + required: true + ram: + description: + - Amount of memory, in MB. + required: false + default: null + vcpus: + description: + - Number of virtual CPUs. + required: false + default: null + disk: + description: + - Size of local disk, in GB. + required: false + default: null + ephemeral: + description: + - Ephemeral space size, in GB. + required: false + default: 0 + swap: + description: + - Swap space size, in MB. + required: false + default: 0 + rxtx_factor: + description: + - RX/TX factor. + required: false + default: 1.0 + is_public: + description: + - Make flavor accessible to the public. + required: false + default: true + flavorid: + description: + - ID for the flavor. This is optional as a unique UUID will be + assigned if a value is not specified. + required: false + default: "auto" +requirements: ["shade"] +''' + +EXAMPLES = ''' +# Create 'tiny' flavor with 1024MB of RAM, 1 virtual CPU, and 10GB of +# local disk, and 10GB of ephemeral. +- os_nova_flavor: + cloud=mycloud + state=present + name=tiny + ram=1024 + vcpus=1 + disk=10 + ephemeral=10 + +# Delete 'tiny' flavor +- os_nova_flavor: + cloud=mycloud + state=absent + name=tiny +''' + +RETURN = ''' +flavor: + description: Dictionary describing the flavor. + returned: On success when I(state) is 'present' + type: dictionary + contains: + id: + description: Flavor ID. + returned: success + type: string + sample: "515256b8-7027-4d73-aa54-4e30a4a4a339" + name: + description: Flavor name. + returned: success + type: string + sample: "tiny" + disk: + description: Size of local disk, in GB. + returned: success + type: int + sample: 10 + ephemeral: + description: Ephemeral space size, in GB. + returned: success + type: int + sample: 10 + ram: + description: Amount of memory, in MB. + returned: success + type: int + sample: 1024 + swap: + description: Swap space size, in MB. + returned: success + type: int + sample: 100 + vcpus: + description: Number of virtual CPUs. + returned: success + type: int + sample: 2 + is_public: + description: Make flavor accessible to the public. + returned: success + type: bool + sample: true +''' + + +def _system_state_change(module, flavor): + state = module.params['state'] + if state == 'present' and not flavor: + return True + if state == 'absent' and flavor: + return True + return False + + +def main(): + argument_spec = openstack_full_argument_spec( + state = dict(required=False, default='present', + choices=['absent', 'present']), + name = dict(required=False), + + # required when state is 'present' + ram = dict(required=False, type='int'), + vcpus = dict(required=False, type='int'), + disk = dict(required=False, type='int'), + + ephemeral = dict(required=False, default=0, type='int'), + swap = dict(required=False, default=0, type='int'), + rxtx_factor = dict(required=False, default=1.0, type='float'), + is_public = dict(required=False, default=True, type='bool'), + flavorid = dict(required=False, default="auto"), + ) + + module_kwargs = openstack_module_kwargs() + module = AnsibleModule( + argument_spec, + supports_check_mode=True, + required_if=[ + ('state', 'present', ['ram', 'vcpus', 'disk']) + ], + **module_kwargs) + + if not HAS_SHADE: + module.fail_json(msg='shade is required for this module') + + state = module.params['state'] + name = module.params['name'] + + try: + cloud = shade.operator_cloud(**module.params) + flavor = cloud.get_flavor(name) + + if module.check_mode: + module.exit_json(changed=_system_state_change(module, flavor)) + + if state == 'present': + if not flavor: + flavor = cloud.create_flavor( + name=name, + ram=module.params['ram'], + vcpus=module.params['vcpus'], + disk=module.params['disk'], + flavorid=module.params['flavorid'], + ephemeral=module.params['ephemeral'], + swap=module.params['swap'], + rxtx_factor=module.params['rxtx_factor'], + is_public=module.params['is_public'] + ) + module.exit_json(changed=True, flavor=flavor) + module.exit_json(changed=False, flavor=flavor) + + elif state == 'absent': + if flavor: + cloud.delete_flavor(name) + module.exit_json(changed=True) + module.exit_json(changed=False) + + except shade.OpenStackCloudException as e: + module.fail_json(msg=e.message) + + +# this is magic, see lib/ansible/module_common.py +from ansible.module_utils.basic import * +from ansible.module_utils.openstack import * +if __name__ == '__main__': + main() From 24c2bccd6607edd7ee67e5cb83b8c1749b58a3bc Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 17 Jul 2015 01:11:04 -0400 Subject: [PATCH 425/464] corrected version_added --- cloud/amazon/cloudformation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index abde0ec375c..f95fbba00e2 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -67,7 +67,7 @@ options: required: false default: null aliases: [] - version_added: "x.x" + version_added: "1.9" tags: description: - Dictionary of tags to associate with stack and it's resources during stack creation. Cannot be updated later. From dc71c04827dd7729e31e931bf32612e6fbc9288a Mon Sep 17 00:00:00 2001 From: whiter Date: Fri, 17 Jul 2015 15:54:17 +1000 Subject: [PATCH 426/464] Added 'resource_tags' alias --- cloud/amazon/ec2_vpc_net.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_vpc_net.py b/cloud/amazon/ec2_vpc_net.py index ebdd4ed6504..2ee730f59cb 100644 --- a/cloud/amazon/ec2_vpc_net.py +++ b/cloud/amazon/ec2_vpc_net.py @@ -59,6 +59,7 @@ options: - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different. default: None required: false + aliases: [ 'resource_tags' ] state: description: - The state of the VPC. Either absent or present. @@ -186,7 +187,7 @@ def main(): dns_support = dict(type='bool', default=True), dns_hostnames = dict(type='bool', default=True), dhcp_opts_id = dict(type='str', default=None, required=False), - tags = dict(type='dict', required=False, default=None), + tags = dict(type='dict', required=False, default=None, aliases=['resource_tags']), state = dict(choices=['present', 'absent'], default='present'), multi_ok = dict(type='bool', default=False) ) From 3533f3953438be8e3b860a2cf514dbd192c26552 Mon Sep 17 00:00:00 2001 From: Eero Niemi Date: Fri, 17 Jul 2015 16:54:39 +0300 Subject: [PATCH 427/464] Fixed parameter validation when creating a volume from a snapshot --- cloud/amazon/ec2_vol.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 712be248af3..0d275cc91d7 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -436,11 +436,11 @@ def main(): # Delaying the checks until after the instance check allows us to get volume ids for existing volumes # without needing to pass an unused volume_size - if not volume_size and not (id or name): - module.fail_json(msg="You must specify an existing volume with id or name or a volume_size") + if not volume_size and not (id or name or snapshot): + module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot") - if volume_size and id: - module.fail_json(msg="Cannot specify volume_size and id") + if volume_size and (id or snapshot): + module.fail_json(msg="Cannot specify volume_size together with id or snapshot") if state == 'absent': delete_volume(module, ec2) From 048cfb857dd7a8c4a55d373e172c2fb47eea2135 Mon Sep 17 00:00:00 2001 From: Herby Gillot Date: Fri, 17 Jul 2015 21:09:34 -0400 Subject: [PATCH 428/464] rds: add the ability to reboot RDS instances --- cloud/amazon/rds.py | 60 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 58 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 3d6f192b9ab..4bfb7e666b0 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -28,7 +28,7 @@ options: required: true default: null aliases: [] - choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'restore' ] + choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ] instance_name: description: - Database instance identifier. Required except when using command=facts or command=delete on just a snapshot @@ -213,6 +213,13 @@ options: default: no choices: [ "yes", "no" ] aliases: [] + force_failover: + description: + - Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover. + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [] new_instance_name: description: - Name to rename an instance to. Used only when command=modify. @@ -292,6 +299,13 @@ EXAMPLES = ''' instance_name: new-database new_instance_name: renamed-database wait: yes + +# Reboot an instance and wait for it to become available again +- rds + command: reboot + instance_name: database + wait: yes + ''' import sys @@ -380,6 +394,13 @@ class RDSConnection: except boto.exception.BotoServerError, e: raise RDSException(e) + def reboot_db_instance(self, instance_name, **params): + try: + result = self.connection.reboot_dbinstance(instance_name) + return RDSDBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) + def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): try: result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) @@ -464,6 +485,13 @@ class RDS2Connection: except boto.exception.BotoServerError, e: raise RDSException(e) + def reboot_db_instance(self, instance_name, **params): + try: + result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) + def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): try: result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance'] @@ -847,6 +875,31 @@ def snapshot_db_instance(module, conn): module.exit_json(changed=changed, snapshot=resource.get_data()) +def reboot_db_instance(module, conn): + required_vars = ['instance_name'] + valid_vars = [] + + if has_rds2: + valid_vars.append('force_failover') + + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + result = conn.get_db_instance(instance_name) + changed = False + try: + result = conn.reboot_db_instance(instance_name, **params) + changed = True + except RDSException, e: + module.fail_json(msg=e.message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + module.exit_json(changed=changed, instance=resource.get_data()) + + def restore_db_instance(module, conn): required_vars = ['instance_name', 'snapshot'] valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone', @@ -918,6 +971,7 @@ def validate_parameters(required_vars, valid_vars, module): 'instance_type': 'db_instance_class', 'password': 'master_user_password', 'new_instance_name': 'new_db_instance_identifier', + 'force_failover': 'force_failover', } if has_rds2: optional_params.update(optional_params_rds2) @@ -960,7 +1014,7 @@ def validate_parameters(required_vars, valid_vars, module): def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( - command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'restore'], required=True), + command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True), instance_name = dict(required=False), source_instance = dict(required=False), db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False), @@ -992,6 +1046,7 @@ def main(): tags = dict(type='dict', required=False), publicly_accessible = dict(required=False), character_set_name = dict(required=False), + force_failover = dict(type='bool', required=False, default=False) ) ) @@ -1010,6 +1065,7 @@ def main(): 'modify': modify_db_instance, 'promote': promote_db_instance, 'snapshot': snapshot_db_instance, + 'reboot': reboot_db_instance, 'restore': restore_db_instance, } From db199991856053c8ee3e1a201aed25d27119e0d0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 17 Jul 2015 23:13:21 -0400 Subject: [PATCH 429/464] minor doc fixes, version added for latest feature --- cloud/amazon/rds.py | 54 +++++++++------------------------------------ 1 file changed, 10 insertions(+), 44 deletions(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 4bfb7e666b0..9e98f50230b 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -24,147 +24,123 @@ description: options: command: description: - - Specifies the action to take. + - Specifies the action to take. required: true - default: null - aliases: [] choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ] instance_name: description: - Database instance identifier. Required except when using command=facts or command=delete on just a snapshot required: false default: null - aliases: [] source_instance: description: - Name of the database to replicate. Used only when command=replicate. required: false default: null - aliases: [] db_engine: description: - - The type of database. Used only when command=create. + - The type of database. Used only when command=create. required: false default: null - aliases: [] choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'] size: description: - Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify. required: false default: null - aliases: [] instance_type: description: - - The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance. + - The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance. required: false default: null - aliases: [] username: description: - Master database username. Used only when command=create. required: false default: null - aliases: [] password: description: - Password for the master database username. Used only when command=create or command=modify. required: false default: null - aliases: [] region: description: - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. required: true - default: null aliases: [ 'aws_region', 'ec2_region' ] db_name: description: - Name of a database to create within the instance. If not specified then no database is created. Used only when command=create. required: false default: null - aliases: [] engine_version: description: - Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used. required: false default: null - aliases: [] parameter_group: description: - Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify. required: false default: null - aliases: [] license_model: description: - - The license model for this DB instance. Used only when command=create or command=restore. + - The license model for this DB instance. Used only when command=create or command=restore. required: false default: null - aliases: [] choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ] multi_zone: description: - Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify. - choices: [ "yes", "no" ] + choices: [ "yes", "no" ] required: false default: null - aliases: [] iops: description: - Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000. required: false default: null - aliases: [] security_groups: description: - Comma separated list of one or more security groups. Used only when command=create or command=modify. required: false default: null - aliases: [] vpc_security_groups: description: - Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify. required: false default: null - aliases: [] port: description: - Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate. required: false default: null - aliases: [] upgrade: description: - Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate. required: false default: no choices: [ "yes", "no" ] - aliases: [] option_group: description: - The name of the option group to use. If not specified then the default option group is used. Used only when command=create. required: false default: null - aliases: [] maint_window: description: - "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify." required: false default: null - aliases: [] backup_window: description: - Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify. required: false default: null - aliases: [] backup_retention: description: - "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify." required: false default: null - aliases: [] zone: description: - availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore. @@ -176,18 +152,15 @@ options: - VPC subnet group. If specified then a VPC instance is created. Used only when command=create. required: false default: null - aliases: [] snapshot: description: - Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot. required: false default: null - aliases: [] aws_secret_key: description: - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. required: false - default: null aliases: [ 'ec2_secret_key', 'secret_key' ] aws_access_key: description: @@ -201,53 +174,46 @@ options: required: false default: "no" choices: [ "yes", "no" ] - aliases: [] wait_timeout: description: - how long before wait gives up, in seconds default: 300 - aliases: [] apply_immediately: description: - Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window. default: no choices: [ "yes", "no" ] - aliases: [] force_failover: description: - Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover. required: false default: "no" choices: [ "yes", "no" ] - aliases: [] + version_added: "2.0" new_instance_name: description: - Name to rename an instance to. Used only when command=modify. required: false default: null - aliases: [] - version_added: 1.5 + version_added: "1.5" character_set_name: description: - Associate the DB instance with a specified character set. Used with command=create. required: false default: null - aliases: [] - version_added: 1.9 + version_added: "1.9" publicly_accessible: description: - explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0 required: false default: null - aliases: [] - version_added: 1.9 + version_added: "1.9" tags: description: - tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0 required: false default: null - aliases: [] - version_added: 1.9 + version_added: "1.9" requirements: - "python >= 2.6" - "boto" From e04f75d8721d67175d00259b364ee8596222ef4d Mon Sep 17 00:00:00 2001 From: bambou Date: Sat, 18 Jul 2015 17:53:02 +0200 Subject: [PATCH 430/464] Check if the gid is set --- system/group.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/system/group.py b/system/group.py index d97dd2176ac..ab542d9bc47 100644 --- a/system/group.py +++ b/system/group.py @@ -273,7 +273,8 @@ class DarwinGroup(Group): def group_add(self, **kwargs): cmd = [self.module.get_bin_path('dseditgroup', True)] cmd += [ '-o', 'create' ] - cmd += [ '-i', self.gid ] + if self.gid is not None: + cmd += [ '-i', self.gid ] cmd += [ '-L', self.name ] (rc, out, err) = self.execute_command(cmd) return (rc, out, err) @@ -285,12 +286,13 @@ class DarwinGroup(Group): (rc, out, err) = self.execute_command(cmd) return (rc, out, err) - def group_mod(self): + def group_mod(self, gid=None): info = self.group_info() if self.gid is not None and int(self.gid) != info[2]: cmd = [self.module.get_bin_path('dseditgroup', True)] cmd += [ '-o', 'edit' ] - cmd += [ '-i', self.gid ] + if gid is not None: + cmd += [ '-i', gid ] cmd += [ '-L', self.name ] (rc, out, err) = self.execute_command(cmd) return (rc, out, err) From 718fd1f891fed028ac8d917817cafaf7817abf1f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 18 Jul 2015 23:28:41 -0400 Subject: [PATCH 431/464] prevent usless assignment of home --- system/user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/user.py b/system/user.py index 33a3ba24d37..1045df70e67 100644 --- a/system/user.py +++ b/system/user.py @@ -253,7 +253,6 @@ class User(object): self.group = module.params['group'] self.groups = module.params['groups'] self.comment = module.params['comment'] - self.home = module.params['home'] self.shell = module.params['shell'] self.password = module.params['password'] self.force = module.params['force'] @@ -269,6 +268,7 @@ class User(object): self.ssh_comment = module.params['ssh_key_comment'] self.ssh_passphrase = module.params['ssh_key_passphrase'] self.update_password = module.params['update_password'] + self.home = None self.expires = None if module.params['home'] is not None: From 3849a6d87b214838c5906d9202eb9c48c75fdae5 Mon Sep 17 00:00:00 2001 From: Patrik Lundin Date: Sun, 19 Jul 2015 12:45:31 +0200 Subject: [PATCH 432/464] synchronize: add flag for verifying target host. Add the possibility to verify the target host using a "verify_host" flag. It is disabled by default to not change the module behaviour. --- files/synchronize.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/files/synchronize.py b/files/synchronize.py index abad5ad359f..ff58f9c1032 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -158,6 +158,12 @@ options: default: no required: false version_added: "2.0" + verify_host: + description: + - Verify destination host key. + default: no + required: false + version_added: "2.0" notes: - rsync must be installed on both the local and remote machine. - Inspect the verbose output to validate the destination user/host/path @@ -244,6 +250,7 @@ def main(): rsync_opts = dict(type='list'), ssh_args = dict(type='str'), partial = dict(default='no', type='bool'), + verify_host = dict(default='no', type='bool'), ), supports_check_mode = True ) @@ -272,6 +279,7 @@ def main(): group = module.params['group'] rsync_opts = module.params['rsync_opts'] ssh_args = module.params['ssh_args'] + verify_host = module.params['verify_host'] cmd = '%s --delay-updates -F' % rsync if compress: @@ -324,10 +332,13 @@ def main(): else: private_key = '-i '+ private_key + ssh_opts = '-S none' + + if not verify_host: + ssh_opts = '%s -o StrictHostKeyChecking=no' % ssh_opts + if ssh_args: - ssh_opts = '-S none -o StrictHostKeyChecking=no %s' % ssh_args - else: - ssh_opts = '-S none -o StrictHostKeyChecking=no' + ssh_opts = '%s %s' % (ssh_opts, ssh_args) if dest_port != 22: cmd += " --rsh 'ssh %s %s -o Port=%s'" % (private_key, ssh_opts, dest_port) From ca80b92233c5eed5f08663d16d9c4bd7600c8e48 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sun, 19 Jul 2015 17:42:12 -0400 Subject: [PATCH 433/464] added version_Added to get_url's force_basic_auth --- network/basics/get_url.py | 1 + 1 file changed, 1 insertion(+) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 9ab039ebb4b..66fc71b78da 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -111,6 +111,7 @@ options: required: false version_added: '1.6' force_basic_auth: + version_added: '2.0' description: - httplib2, the library used by the uri module only sends authentication information when a webservice responds to an initial request with a 401 status. Since some basic auth services do not properly From fca75a9705e8c2d698d65f86337cdfc4ea996521 Mon Sep 17 00:00:00 2001 From: Daniel Kerwin Date: Mon, 24 Nov 2014 22:55:14 +0100 Subject: [PATCH 434/464] Add support for cpusets. Requires docker-py >= 0.6.0 --- cloud/docker/docker.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 5e78ec98969..bde2f7dafe5 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -273,8 +273,14 @@ options: docker-py >= 0.5.0. default: false version_added: "1.9" + cpuset: + description: + - CPUs in which to allow execution. Requires docker-py >= 0.6.0. + required: false + default: null + version_added: "1.8" -author: +author: - "Cove Schneider (@cove)" - "Joshua Conner (@joshuaconner)" - "Pavel Antonov (@softzilla)" @@ -1583,6 +1589,7 @@ def main(): pid = dict(default=None), insecure_registry = dict(default=False, type='bool'), log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']), + cpuset = dict(default=None), ), required_together = ( ['tls_client_cert', 'tls_client_key'], From a8bc50a11f5801fa71ccb695867d9944ea294db5 Mon Sep 17 00:00:00 2001 From: Daniel Kerwin Date: Mon, 24 Nov 2014 23:08:12 +0100 Subject: [PATCH 435/464] Renamed to cpu_set --- cloud/docker/docker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index bde2f7dafe5..b30046ae157 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -273,7 +273,7 @@ options: docker-py >= 0.5.0. default: false version_added: "1.9" - cpuset: + cpu_set: description: - CPUs in which to allow execution. Requires docker-py >= 0.6.0. required: false @@ -1589,7 +1589,7 @@ def main(): pid = dict(default=None), insecure_registry = dict(default=False, type='bool'), log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']), - cpuset = dict(default=None), + cpu_set = dict(default=None), ), required_together = ( ['tls_client_cert', 'tls_client_key'], From a702dbd29ac266837da9a0a25d2c6b51d3a87c44 Mon Sep 17 00:00:00 2001 From: Daniel Kerwin Date: Mon, 29 Jun 2015 22:15:25 +0200 Subject: [PATCH 436/464] Switch to _cap_ver_req and add cpu_set to create_containers --- cloud/docker/docker.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index b30046ae157..fcd2a3453d0 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -537,6 +537,7 @@ class DockerManager(object): 'pid': ((1, 0, 0), '1.17'), 'log_driver': ((1, 2, 0), '1.18'), 'host_config': ((0, 7, 0), '1.15'), + 'cpu_set': ((0, 6, 0), '1.14'), # Clientside only 'insecure_registry': ((0, 5, 0), '0.0') } @@ -559,7 +560,7 @@ class DockerManager(object): elif 2 <= len(parts) <= 3: # default to read-write ro = False - # with supplied bind mode + # with supplied bind mode if len(parts) == 3: if parts[2] not in ['ro', 'rw']: self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') @@ -1356,6 +1357,8 @@ class DockerManager(object): 'name': self.module.params.get('name'), 'stdin_open': self.module.params.get('stdin_open'), 'tty': self.module.params.get('tty'), + 'cpuset': self.module.params.get('cpu_set'), + 'host_config': self.create_host_config(), } if self.ensure_capability('host_config', fail=False): From d4d78a1998cc83e2aa232bb7922fc7c8c514e0e7 Mon Sep 17 00:00:00 2001 From: Daniel Kerwin Date: Tue, 30 Jun 2015 17:48:53 +0200 Subject: [PATCH 437/464] Too late for 1.8 --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index fcd2a3453d0..62c637a5e04 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -278,7 +278,7 @@ options: - CPUs in which to allow execution. Requires docker-py >= 0.6.0. required: false default: null - version_added: "1.8" + version_added: "1.9" author: - "Cove Schneider (@cove)" From 01f8a99509c0715579c6085c518582d3fef6a941 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 20 Jul 2015 13:38:37 -0700 Subject: [PATCH 438/464] Deprecated _ec2_ami_search now verifies SSL certificates --- cloud/amazon/_ec2_ami_search.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/cloud/amazon/_ec2_ami_search.py b/cloud/amazon/_ec2_ami_search.py index 65953af2b5d..4fac97e9471 100644 --- a/cloud/amazon/_ec2_ami_search.py +++ b/cloud/amazon/_ec2_ami_search.py @@ -65,6 +65,15 @@ options: required: false default: paravirtual choices: ["paravirtual", "hvm"] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only + be set to C(no) used on personally controlled sites using self-signed + certificates. Prior to 1.9.3 the code defaulted to C(no). + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: '1.9.3' author: Lorin Hochstein ''' @@ -102,11 +111,12 @@ AWS_REGIONS = ['ap-northeast-1', def get_url(module, url): """ Get url and return response """ - try: - r = urllib2.urlopen(url) - except (urllib2.HTTPError, urllib2.URLError), e: - code = getattr(e, 'code', -1) - module.fail_json(msg="Request failed: %s" % str(e), status_code=code) + + r, info = fetch_url(module, url) + if info['status'] != 200: + # Backwards compat + info['status_code'] = info['status'] + module.fail_json(**info) return r @@ -182,7 +192,8 @@ def main(): choices=['i386', 'amd64']), region=dict(required=False, default='us-east-1', choices=AWS_REGIONS), virt=dict(required=False, default='paravirtual', - choices=['paravirtual', 'hvm']) + choices=['paravirtual', 'hvm']), + validate_certs = dict(required=False, default=True, type='bool'), ) module = AnsibleModule(argument_spec=arg_spec) distro = module.params['distro'] @@ -196,6 +207,7 @@ def main(): # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * +from ansible.module_utils.urls import * if __name__ == '__main__': main() From 3f7313cc431229cde0e250b8c5f4f1b592d223a4 Mon Sep 17 00:00:00 2001 From: ayush Date: Mon, 20 Jul 2015 17:05:10 -0700 Subject: [PATCH 439/464] Updated doc strings so each character isn't considered a line --- cloud/amazon/s3.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 7b6990e25e3..072c8bc40d4 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -35,7 +35,8 @@ options: default: null aliases: ['ec2_secret_key', 'secret_key'] bucket: - description: Bucket name. + description: + - Bucket name. required: true default: null aliases: [] @@ -118,11 +119,13 @@ options: default: 0 version_added: "2.0" s3_url: - description: S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS + description: + - S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS default: null aliases: [ S3_URL ] src: - description: The source file path when performing a PUT operation. + description: + - The source file path when performing a PUT operation. required: false default: null aliases: [] From 24419284840cd7022a3083219c67570be07dc67e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 20 Jul 2015 20:09:36 -0400 Subject: [PATCH 440/464] corrected v ersion added --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 62c637a5e04..1c4e6e8cd4e 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -278,7 +278,7 @@ options: - CPUs in which to allow execution. Requires docker-py >= 0.6.0. required: false default: null - version_added: "1.9" + version_added: "2.0" author: - "Cove Schneider (@cove)" From 1dafa427c3d0f299da2382c5b4a92d52805a5faa Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 20 Jul 2015 20:52:43 -0400 Subject: [PATCH 441/464] added versionadded to new option in pip module --- packaging/language/pip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index abfb9385152..ec0bf93979c 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -71,7 +71,7 @@ options: required: false default: virtualenv virtualenv_python: - version_added: "FIXME" + version_added: "2.0" description: - The Python executable used for creating the virtual environment. For example C(python3.4), C(python2.7). When not specified, the From 030d6d645c61c9586b38c9b507bb2bb2a1b7efe4 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 20 Jul 2015 20:53:59 -0400 Subject: [PATCH 442/464] added version_Added for new signal option in docker module --- cloud/docker/docker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 59814c30e01..96254a10654 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -195,6 +195,7 @@ options: disabled, fail unless the process exits cleanly. default: true signal: + version_added: "2.0" description: - With the state "killed", you can alter the signal sent to the container. From 88167a5daca39304bbb0a20cd62a2c26e72fe5f3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 20 Jul 2015 21:18:49 -0400 Subject: [PATCH 443/464] minor doc fixes to docker_user --- cloud/docker/docker.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index abb4e764aae..e77951abf49 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -164,8 +164,7 @@ options: description: - Username or UID to use within the container required: false - default: - aliases: [] + default: null version_added: "2.0" username: description: From 55bc9e8fb1c8e1a50418d8471a72f049e74fd06d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 20 Jul 2015 22:37:02 -0400 Subject: [PATCH 444/464] added rickmendes as maintainer --- cloud/amazon/ec2_elb_lb.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index f2a04863923..3d54f994436 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -22,7 +22,9 @@ description: - Will be marked changed when called only if state is changed. short_description: Creates or destroys Amazon ELB. version_added: "1.5" -author: "Jim Dalton (@jsdalton)" +author: + - "Jim Dalton (@jsdalton)" + - "Rick Mendes (@rickmendes)" options: state: description: From 38f01b52e83f8a91c9e036a096d4413f0a39386d Mon Sep 17 00:00:00 2001 From: Johan Haals Date: Tue, 21 Jul 2015 14:24:07 +0200 Subject: [PATCH 445/464] Added support for --read-only Docker containers Adds support for mounting the container's root filesystem as read only. --- cloud/docker/docker.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index e77951abf49..6308bd94efe 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -265,6 +265,12 @@ options: default: DockerHub aliases: [] version_added: "1.8" + read_only: + description: + - Mount the container's root filesystem as read only + default: false + aliases: [] + version_added: "1.9" restart_policy: description: - Container restart policy. @@ -772,6 +778,7 @@ class DockerManager(object): 'privileged': self.module.params.get('privileged'), 'links': self.links, 'network_mode': self.module.params.get('net'), + 'read_only': self.module.params.get('read_only'), } optionals = {} @@ -1609,6 +1616,7 @@ def main(): insecure_registry = dict(default=False, type='bool'), log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']), cpu_set = dict(default=None), + read_only = dict(default=False, type='bool'), ), required_together = ( ['tls_client_cert', 'tls_client_key'], From 51666c6defe04dcb976850be515f47a35a0305dd Mon Sep 17 00:00:00 2001 From: Yuhui Huang Date: Tue, 21 Jul 2015 16:07:25 -0700 Subject: [PATCH 446/464] Checking pip uninstall output in both stdout and stderr --- packaging/language/pip.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index ec0bf93979c..8bbae35038d 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -155,7 +155,7 @@ def _get_cmd_options(module, cmd): words = stdout.strip().split() cmd_options = [ x for x in words if x.startswith('--') ] return cmd_options - + def _get_full_name(name, version=None): if version is None: @@ -356,7 +356,8 @@ def main(): rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=this_dir) out += out_pip err += err_pip - if rc == 1 and state == 'absent' and 'not installed' in out_pip: + if rc == 1 and state == 'absent' and \ + ('not installed' in out_pip or 'not installed' in err_pip): pass # rc is 1 when attempting to uninstall non-installed package elif rc != 0: _fail(module, cmd, out, err) From b96d304b93b8768f3427cc5495a66fb56e09453c Mon Sep 17 00:00:00 2001 From: Christian Hammerl Date: Sat, 18 Oct 2014 15:25:07 +0200 Subject: [PATCH 447/464] docker: add support to add/drop capabilities --- cloud/docker/docker.py | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index e77951abf49..6cf7ed1d51e 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -292,7 +292,19 @@ options: required: false default: null version_added: "2.0" - + cap_add: + description: + - Add capabilities for the container. Requires docker-py >= 0.5.0. + required: false + default: false + version_added: "2.0" + cap_drop: + description: + - Drop capabilities for the container. Requires docker-py >= 0.5.0. + required: false + default: false + aliases: [] + version_added: "2.0" author: - "Cove Schneider (@cove)" - "Joshua Conner (@joshuaconner)" @@ -551,6 +563,8 @@ class DockerManager(object): 'log_driver': ((1, 2, 0), '1.18'), 'host_config': ((0, 7, 0), '1.15'), 'cpu_set': ((0, 6, 0), '1.14'), + 'cap_add': ((0, 5, 0), '1.14'), + 'cap_drop': ((0, 5, 0), '1.14'), # Clientside only 'insecure_registry': ((0, 5, 0), '0.0') } @@ -1321,7 +1335,8 @@ class DockerManager(object): optionals = {} for optional_param in ('dns', 'volumes_from', 'restart_policy', - 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver'): + 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver', + 'cap_add', 'cap_drop'): optionals[optional_param] = self.module.params.get(optional_param) if optionals['dns'] is not None: @@ -1356,6 +1371,14 @@ class DockerManager(object): log_config.type = optionals['log_driver'] params['log_config'] = log_config + if optionals['cap_add'] is not None: + self.ensure_capability('cap_add') + params['cap_add'] = optionals['cap_add'] + + if optionals['cap_drop'] is not None: + self.ensure_capability('cap_drop') + params['cap_drop'] = optionals['cap_drop'] + return docker.utils.create_host_config(**params) def create_containers(self, count=1): @@ -1609,6 +1632,8 @@ def main(): insecure_registry = dict(default=False, type='bool'), log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']), cpu_set = dict(default=None), + cap_add = dict(default=None, type='list'), + cap_drop = dict(default=None, type='list'), ), required_together = ( ['tls_client_cert', 'tls_client_key'], From fd6518179b32c53103fe0c4b1dbbeb4486ed7532 Mon Sep 17 00:00:00 2001 From: Jeff Widman Date: Tue, 2 Jun 2015 14:07:30 -0700 Subject: [PATCH 448/464] Move validate command into doc fragment and fix a few typos --- files/copy.py | 11 ++--------- files/lineinfile.py | 11 +---------- files/replace.py | 7 +------ files/template.py | 10 +--------- 4 files changed, 5 insertions(+), 34 deletions(-) diff --git a/files/copy.py b/files/copy.py index b7f333cead6..ad56800764b 100644 --- a/files/copy.py +++ b/files/copy.py @@ -63,21 +63,13 @@ options: force: description: - the default is C(yes), which will replace the remote file when contents - are different than the source. If C(no), the file will only be transferred + are different than the source. If C(no), the file will only be transferred if the destination does not exist. version_added: "1.1" required: false choices: [ "yes", "no" ] default: "yes" aliases: [ "thirsty" ] - validate: - description: - - The validation command to run before copying into place. The path to the file to - validate is passed in via '%s' which must be present as in the visudo example below. - The command is passed securely so shell features like expansion and pipes won't work. - required: false - default: "" - version_added: "1.2" directory_mode: description: - When doing a recursive copy set the mode for the directories. If this is not set we will use the system @@ -86,6 +78,7 @@ options: required: false version_added: "1.5" extends_documentation_fragment: files +extends_documentation_fragment: validate author: - "Ansible Core Team" - "Michael DeHaan" diff --git a/files/lineinfile.py b/files/lineinfile.py index 6bcfb3b3060..777f0a498a9 100644 --- a/files/lineinfile.py +++ b/files/lineinfile.py @@ -31,6 +31,7 @@ author: - "Daniel Hokka Zakrissoni (@dhozac)" - "Ahti Kitsik (@ahtik)" extends_documentation_fragment: files +extends_documentation_fragment: validate short_description: Ensure a particular line is in a file, or replace an existing line using a back-referenced regular expression. description: @@ -116,16 +117,6 @@ options: description: - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. - validate: - required: false - description: - - validation to run before copying into place. - Use %s in the command to indicate the current file to validate. - The command is passed securely so shell features like - expansion and pipes won't work. - required: false - default: None - version_added: "1.4" others: description: - All arguments accepted by the M(file) module also work here. diff --git a/files/replace.py b/files/replace.py index fa0142823ea..dea2c32a54f 100644 --- a/files/replace.py +++ b/files/replace.py @@ -27,6 +27,7 @@ DOCUMENTATION = """ module: replace author: "Evan Kaufman (@EvanK)" extends_documentation_fragment: files +extends_documentation_fragment: validate short_description: Replace all instances of a particular string in a file using a back-referenced regular expression. description: @@ -61,12 +62,6 @@ options: description: - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. - validate: - required: false - description: - - validation to run before copying into place - required: false - default: None others: description: - All arguments accepted by the M(file) module also work here. diff --git a/files/template.py b/files/template.py index a1dc72c27bd..120917f49c2 100644 --- a/files/template.py +++ b/files/template.py @@ -38,15 +38,6 @@ options: required: false choices: [ "yes", "no" ] default: "no" - validate: - description: - - The validation command to run before copying into place. - - The path to the file to validate is passed in via '%s' which must be present as in the visudo example below. - - validation to run before copying into place. The command is passed - securely so shell features like expansion and pipes won't work. - required: false - default: "" - version_added: "1.2" force: description: - the default is C(yes), which will replace the remote file when contents @@ -62,6 +53,7 @@ author: - Ansible Core Team - Michael DeHaan extends_documentation_fragment: files +extends_documentation_fragment: validate ''' EXAMPLES = ''' From 2a0f6c1cb33cf2cab5303ca1f26852011c7e2a80 Mon Sep 17 00:00:00 2001 From: Will Thames Date: Wed, 22 Jul 2015 12:05:21 +1000 Subject: [PATCH 449/464] Correct handling of empty role_attr_flags role_attr_flags is the empty string by default, not None. --- database/postgresql/postgresql_user.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index d3f6d81c360..353d3ac3d93 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -92,7 +92,7 @@ options: description: - "PostgreSQL role attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER" required: false - default: null + default: "" choices: [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB", "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ] state: @@ -233,7 +233,7 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir return False # Handle passwords. - if not no_password_changes and (password is not None or role_attr_flags is not None): + if not no_password_changes and (password is not None or role_attr_flags != ''): # Select password and all flag-like columns in order to verify changes. query_password_data = dict(password=password, expires=expires) select = "SELECT * FROM pg_authid where rolname=%(user)s" From 47cb92f74f17c31c72a535e3ae153c328f85be53 Mon Sep 17 00:00:00 2001 From: Will Thames Date: Wed, 22 Jul 2015 13:34:52 +1000 Subject: [PATCH 450/464] Ensure TEMP privilege gets removed when expanding ALL. ALL gets expanded to the list of VALID_PRIVS which includes TEMPORARY and TEMP The code that replaced TEMP with TEMPORARY didn't work with the expansion --- database/postgresql/postgresql_user.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index 353d3ac3d93..cee5a9ae131 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -490,10 +490,10 @@ def parse_role_attrs(role_attr_flags): def normalize_privileges(privs, type_): new_privs = set(privs) - if 'ALL' in privs: + if 'ALL' in new_privs: new_privs.update(VALID_PRIVS[type_]) new_privs.remove('ALL') - if 'TEMP' in privs: + if 'TEMP' in new_privs: new_privs.add('TEMPORARY') new_privs.remove('TEMP') From dc9dfa9ef8e21eb7fc037c0f8c54d510a77e1beb Mon Sep 17 00:00:00 2001 From: Baraa Basata Date: Wed, 22 Jul 2015 00:03:33 -0400 Subject: [PATCH 451/464] Fix iam_policy example --- cloud/amazon/iam_policy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index 5026169e104..9213d1585b0 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -120,7 +120,7 @@ tasks: iam_policy: iam_type: user iam_name: "{{ item.user }}" - policy_name: "s3_limited_access_{{ item.s3_user_prefix }}" + policy_name: "s3_limited_access_{{ item.prefix }}" state: present policy_json: " {{ lookup( 'template', 's3_policy.json.j2') }} " with_items: From 851e55b55fb916c35cbec7779da116281b279e4a Mon Sep 17 00:00:00 2001 From: Johan Haals Date: Wed, 22 Jul 2015 08:28:16 +0200 Subject: [PATCH 452/464] changed version_added for read_only param this feature will be released in 2.0 --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 1bd49b0f66d..919ff62ba1d 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -270,7 +270,7 @@ options: - Mount the container's root filesystem as read only default: false aliases: [] - version_added: "1.9" + version_added: "2.0" restart_policy: description: - Container restart policy. From 313381981e14aa1d92ff41de362b28c127fd1fe6 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Wed, 22 Jul 2015 03:14:20 -0400 Subject: [PATCH 453/464] Save user after creating before trying to read/set other properties. Fixes #1241 --- windows/win_user.ps1 | 1 + 1 file changed, 1 insertion(+) diff --git a/windows/win_user.ps1 b/windows/win_user.ps1 index ae4847a8528..b7be7e4eea3 100644 --- a/windows/win_user.ps1 +++ b/windows/win_user.ps1 @@ -146,6 +146,7 @@ If ($state -eq 'present') { If ($password -ne $null) { $user_obj.SetPassword($password) } + $user_obj.SetInfo() $result.changed = $true } ElseIf (($password -ne $null) -and ($update_password -eq 'always')) { From 2133cb11713b1f4f945b0b0fc032a0b17d752aa8 Mon Sep 17 00:00:00 2001 From: Artur Cygan Date: Wed, 22 Jul 2015 11:36:32 +0200 Subject: [PATCH 454/464] Update route53.py Fix typos --- cloud/amazon/route53.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index c659843b9a3..aca01193555 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -213,7 +213,7 @@ EXAMPLES = ''' - route53: command: "create" zone: "foo.com" - hostes_zone_id: "Z2AABBCCDDEEFF" + hosted_zone_id: "Z2AABBCCDDEEFF" record: "localhost.foo.com" type: "AAAA" ttl: "7200" @@ -224,7 +224,7 @@ EXAMPLES = ''' - route53: command: "create" zone: "foo.com" - hostes_zone_id: "Z2AABBCCDDEEFF" + hosted_zone_id: "Z2AABBCCDDEEFF" record: "localhost.foo.com" type: "AAAA" ttl: "7200" From 222927256d14b8e530853f0a2c7cb878a3e991fd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 22 Jul 2015 07:08:37 -0700 Subject: [PATCH 455/464] Remove validate_certs as the url is not user settable so we always want to validate the certificate --- cloud/amazon/_ec2_ami_search.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/cloud/amazon/_ec2_ami_search.py b/cloud/amazon/_ec2_ami_search.py index 4fac97e9471..ec9da6d4988 100644 --- a/cloud/amazon/_ec2_ami_search.py +++ b/cloud/amazon/_ec2_ami_search.py @@ -65,15 +65,6 @@ options: required: false default: paravirtual choices: ["paravirtual", "hvm"] - validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only - be set to C(no) used on personally controlled sites using self-signed - certificates. Prior to 1.9.3 the code defaulted to C(no). - required: false - default: 'yes' - choices: ['yes', 'no'] - version_added: '1.9.3' author: Lorin Hochstein ''' @@ -193,7 +184,6 @@ def main(): region=dict(required=False, default='us-east-1', choices=AWS_REGIONS), virt=dict(required=False, default='paravirtual', choices=['paravirtual', 'hvm']), - validate_certs = dict(required=False, default=True, type='bool'), ) module = AnsibleModule(argument_spec=arg_spec) distro = module.params['distro'] From e13e369aaed1973054e9f7ea15c16782db83a7c8 Mon Sep 17 00:00:00 2001 From: Guilherme Carlos Date: Wed, 22 Jul 2015 17:55:35 -0300 Subject: [PATCH 456/464] Fix login_user on mysql_user `login_username` was provided but `login_user` is the correct option --- database/mysql/mysql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 0ff290f1185..36c400553ca 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -109,7 +109,7 @@ options: notes: - Requires the MySQLdb Python package on the remote host. For Ubuntu, this is as easy as apt-get install python-mysqldb. - - Both C(login_password) and C(login_username) are required when you are + - Both C(login_password) and C(login_user) are required when you are passing credentials. If none are present, the module will attempt to read the credentials from C(~/.my.cnf), and finally fall back to using the MySQL default login of 'root' with no password. From 1ea03e71825a652a8b83ab6ab933df9994e60c22 Mon Sep 17 00:00:00 2001 From: Johan Haals Date: Thu, 23 Jul 2015 16:45:01 +0200 Subject: [PATCH 457/464] ensures API compatibility for read_only containers --- cloud/docker/docker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 919ff62ba1d..035766fac77 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -571,6 +571,7 @@ class DockerManager(object): 'cpu_set': ((0, 6, 0), '1.14'), 'cap_add': ((0, 5, 0), '1.14'), 'cap_drop': ((0, 5, 0), '1.14'), + 'read_only': ((1, 0, 0), '1.17'), # Clientside only 'insecure_registry': ((0, 5, 0), '0.0') } From 8ffe34dcf1ac663a00237048d62b2f04c029ded5 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Thu, 23 Jul 2015 14:23:00 -0400 Subject: [PATCH 458/464] Add new policy guidelines for Core --- CONTRIBUTING.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e441a4e3527..ea9c4ced04e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -22,6 +22,10 @@ I'd also read the community page above, but in particular, make sure you copy [t Also please make sure you are testing on the latest released version of Ansible or the development branch. +If you'd like to contribute code to an existing module +====================================================== +Each module in Core is maintained by the owner of that module; each module's owner is indicated in the documentation section of the module itself. Any pull request for a module that is given a +1 by the owner in the comments will be merged by the Ansible team. + Thanks! From 6a872469fc1bda54126dfa91338cf7a26bde701c Mon Sep 17 00:00:00 2001 From: fti7 Date: Fri, 17 Oct 2014 18:23:25 +0200 Subject: [PATCH 459/464] mount: Support for Check-Mode --- system/mount.py | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/system/mount.py b/system/mount.py index f052e36dd2d..ff7094dad3b 100644 --- a/system/mount.py +++ b/system/mount.py @@ -108,7 +108,7 @@ def _escape_fstab(v): """ escape space (040), ampersand (046) and backslash (134) which are invalid in fstab fields """ return v.replace('\\', '\\134').replace(' ', '\\040').replace('&', '\\046') -def set_mount(**kwargs): +def set_mount(module, **kwargs): """ set/change a mount point location in fstab """ # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab @@ -167,14 +167,14 @@ def set_mount(**kwargs): to_write.append(new_line % args) changed = True - if changed: + if changed and not module.check_mode: write_fstab(to_write, args['fstab']) # mount function needs origname return (origname, changed) -def unset_mount(**kwargs): +def unset_mount(module, **kwargs): """ remove a mount point from fstab """ # kwargs: name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab @@ -217,7 +217,7 @@ def unset_mount(**kwargs): # if we got here we found a match - continue and mark changed changed = True - if changed: + if changed and not module.check_mode: write_fstab(to_write, args['fstab']) # umount needs origname @@ -281,7 +281,8 @@ def main(): src = dict(required=True), fstype = dict(required=True), fstab = dict(default='/etc/fstab') - ) + ), + supports_check_mode=True ) @@ -316,8 +317,8 @@ def main(): state = module.params['state'] name = module.params['name'] if state == 'absent': - name, changed = unset_mount(**args) - if changed: + name, changed = unset_mount(module, **args) + if changed and not module.check_mode: if os.path.ismount(name): res,msg = umount(module, **args) if res: @@ -333,26 +334,27 @@ def main(): if state == 'unmounted': if os.path.ismount(name): - res,msg = umount(module, **args) - if res: - module.fail_json(msg="Error unmounting %s: %s" % (name, msg)) + if not module.check_mode: + res,msg = umount(module, **args) + if res: + module.fail_json(msg="Error unmounting %s: %s" % (name, msg)) changed = True module.exit_json(changed=changed, **args) if state in ['mounted', 'present']: if state == 'mounted': - if not os.path.exists(name): + if not os.path.exists(name) and not module.check_mode: try: os.makedirs(name) except (OSError, IOError), e: module.fail_json(msg="Error making dir %s: %s" % (name, str(e))) - name, changed = set_mount(**args) + name, changed = set_mount(module, **args) if state == 'mounted': res = 0 if os.path.ismount(name): - if changed: + if changed and not module.check_mode: res,msg = mount(module, **args) elif 'bind' in args.get('opts', []): changed = True @@ -367,7 +369,9 @@ def main(): res,msg = mount(module, **args) else: changed = True - res,msg = mount(module, **args) + if not module.check_mode: + res,msg = mount(module, **args) + if res: module.fail_json(msg="Error mounting %s: %s" % (name, msg)) From 03a809a21c0fc683c8e78666e633dfbd85ee216f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 23 Jul 2015 15:52:11 -0400 Subject: [PATCH 460/464] added version_added to body_format in uri --- network/basics/uri.py | 1 + 1 file changed, 1 insertion(+) diff --git a/network/basics/uri.py b/network/basics/uri.py index d7e5eee427c..3babba6d609 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -71,6 +71,7 @@ options: required: false choices: [ "raw", "json" ] default: raw + version_added: "2.0" method: description: - The HTTP method of the request or response. From 5f2b365faa456990dfa65c7d4ba3393168069f67 Mon Sep 17 00:00:00 2001 From: khassen Date: Thu, 13 Nov 2014 20:58:00 -0500 Subject: [PATCH 461/464] Use the common/shared MD5 function. --- cloud/amazon/s3.py | 13 ++----------- cloud/google/gc_storage.py | 4 ++-- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 072c8bc40d4..e6b511b36b8 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -175,7 +175,6 @@ EXAMPLES = ''' import os import urlparse -import hashlib from ssl import SSLError try: @@ -356,13 +355,6 @@ def is_walrus(s3_url): else: return False -def get_md5_digest(local_file): - md5 = hashlib.md5() - with open(local_file, 'rb') as f: - for data in f.read(1024 ** 2): - md5.update(data) - return md5.hexdigest() - def main(): argument_spec = ec2_argument_spec() @@ -488,8 +480,7 @@ def main(): # Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists. if pathrtn is True: md5_remote = keysum(module, s3, bucket, obj, version=version) - md5_local = get_md5_digest(dest) - + md5_local = module.md5(dest) if md5_local == md5_remote: sum_matches = True if overwrite == 'always': @@ -532,7 +523,7 @@ def main(): # Lets check key state. Does it exist and if it does, compute the etag md5sum. if bucketrtn is True and keyrtn is True: md5_remote = keysum(module, s3, bucket, obj) - md5_local = get_md5_digest(src) + md5_local = module.md5(src) if md5_local == md5_remote: sum_matches = True diff --git a/cloud/google/gc_storage.py b/cloud/google/gc_storage.py index 280bc42a219..c1e6f5707a6 100644 --- a/cloud/google/gc_storage.py +++ b/cloud/google/gc_storage.py @@ -284,7 +284,7 @@ def get_download_url(module, gs, bucket, obj, expiry): def handle_get(module, gs, bucket, obj, overwrite, dest): md5_remote = keysum(module, gs, bucket, obj) - md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest() + md5_local = module.md5(dest) if md5_local == md5_remote: module.exit_json(changed=False) if md5_local != md5_remote and not overwrite: @@ -300,7 +300,7 @@ def handle_put(module, gs, bucket, obj, overwrite, src, expiration): # Lets check key state. Does it exist and if it does, compute the etag md5sum. if bucket_rc and key_rc: md5_remote = keysum(module, gs, bucket, obj) - md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest() + md5_local = module.md5(src) if md5_local == md5_remote: module.exit_json(msg="Local and remote object are identical", changed=False) if md5_local != md5_remote and not overwrite: From a3b9fb58bfc1935dd49bdb284cf62d6ed9662909 Mon Sep 17 00:00:00 2001 From: KIKUCHI Koichiro Date: Wed, 22 Jul 2015 19:10:05 +0900 Subject: [PATCH 462/464] Fix service enabled check failure on FreeBSD --- system/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index 4336b7a7775..8caece20143 100644 --- a/system/service.py +++ b/system/service.py @@ -988,7 +988,7 @@ class FreeBsdService(Service): try: return self.service_enable_rcconf() - except: + except Exception: self.module.fail_json(msg='unable to set rcvar') def service_control(self): From 777d736baa0e1a3f0943b03a5dc2d1d219f96fe7 Mon Sep 17 00:00:00 2001 From: Dan Date: Tue, 5 May 2015 23:31:02 +0100 Subject: [PATCH 463/464] Added header support Added the ability to pass custom http headers to the fetch_url method. --- network/basics/get_url.py | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 66fc71b78da..d755808485e 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -98,6 +98,12 @@ options: required: false default: 10 version_added: '1.8' + headers: + description: + - Add custom HTTP headers to a request in the format 'key:value,key:value' + required: false + default: null + version_added: '1.9' url_username: description: - The username for use in HTTP basic authentication. This parameter can be used @@ -138,6 +144,9 @@ EXAMPLES=''' - name: download file and force basic auth get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf force_basic_auth=yes + +- name: download file with custom HTTP headers + get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf headers: 'key:value,key:value' ''' import urlparse @@ -157,14 +166,14 @@ def url_filename(url): return 'index.html' return fn -def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10): +def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None): """ Download data from the url and store in a temporary file. Return (tempfile, info about the request) """ - rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout) + rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout, headers=headers) if info['status'] == 304: module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', '')) @@ -214,6 +223,7 @@ def main(): dest = dict(required=True), sha256sum = dict(default=''), timeout = dict(required=False, type='int', default=10), + headers = dict(required=False, default=None), ) module = AnsibleModule( @@ -228,6 +238,15 @@ def main(): sha256sum = module.params['sha256sum'] use_proxy = module.params['use_proxy'] timeout = module.params['timeout'] + + # Parse headers to dict + if module.params['headers']: + try: + headers = dict(item.split(':') for item in module.params['headers'].split(',')) + except: + module.fail_json(msg="The header parameter requires a key:value,key:value syntax to be properly parsed.") + else: + headers = None dest_is_dir = os.path.isdir(dest) last_mod_time = None @@ -263,7 +282,7 @@ def main(): last_mod_time = datetime.datetime.utcfromtimestamp(mtime) # download to tmpsrc - tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout) + tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers) # Now the request has completed, we can finally generate the final # destination file name from the info dict. From 76344db61e6de2a0a5c18bc2734f26704caba31f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 24 Jul 2015 12:12:40 -0400 Subject: [PATCH 464/464] fixed version added --- network/basics/get_url.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index d755808485e..01479260277 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -103,7 +103,7 @@ options: - Add custom HTTP headers to a request in the format 'key:value,key:value' required: false default: null - version_added: '1.9' + version_added: '2.0' url_username: description: - The username for use in HTTP basic authentication. This parameter can be used