From f7c498d0596276b6a4a3c84f9a943471cd733b68 Mon Sep 17 00:00:00 2001 From: Petros Moisiadis Date: Sun, 28 Sep 2014 13:24:47 +0300 Subject: [PATCH 001/236] synchronize: use a single -F instead of -FF This small change corrects behavior when one uses an .rsync-filter file to exclude some paths from both being transferred and being deleted, so that these excluded paths can be handled separately with different tasks (e.g. in order to deploy the excluded paths independently from the rest paths and notify handlers appropriately). The problem with the double -FF option is that it excludes the .rsync-filter file from being transferred to the receiver. However, deletions are done on the side of the receiver, so it is absolutely necessary the .rsync-filter file to be transferred to the receiver, so that the receiver knows what files to delete and what not to delete. --- files/synchronize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/synchronize.py b/files/synchronize.py index 842dd863849..5935bc29ec1 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -252,7 +252,7 @@ def main(): group = module.params['group'] rsync_opts = module.params['rsync_opts'] - cmd = '%s --delay-updates -FF' % rsync + cmd = '%s --delay-updates -F' % rsync if compress: cmd = cmd + ' --compress' if rsync_timeout: From b3e84f2dd42afbc35cd0ed349320d5fb4f3ab2ec Mon Sep 17 00:00:00 2001 From: Phillip Holmes Date: Mon, 29 Sep 2014 16:20:25 -0500 Subject: [PATCH 002/236] Route53 fix - forcing zone_in, record_in to lower case It turns out the Route53 API cares if the zone and record specified in the playbook are lower case or not when deleting a record. If you use a variable to name your servers and care about case, using that same proper case name will cause Route53 DNS delete requests to fail. The change requested adds .lower() to the module.params.get for both zone and record when used in the underlying code. Both zone and record are mandatory variables, and as such a more complicated implementation is not needed, as they must always be specified when using this module see lines 169 and 170 for the required state). If you use lowercase names (or don't use a name variable and share it between a tag and DNS entries) then you will never see this issue. Tested/Confirmed as an issue in Ansible 1.6.6 and above. --- cloud/route53.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/route53.py b/cloud/route53.py index b3878e0580e..d0723a3d0e6 100644 --- a/cloud/route53.py +++ b/cloud/route53.py @@ -178,9 +178,9 @@ def main(): module = AnsibleModule(argument_spec=argument_spec) command_in = module.params.get('command') - zone_in = module.params.get('zone') + zone_in = module.params.get('zone').tolower ttl_in = module.params.get('ttl') - record_in = module.params.get('record') + record_in = module.params.get('record').tolower type_in = module.params.get('type') value_in = module.params.get('value') retry_interval_in = module.params.get('retry_interval') From 7402827950b873d35ff7d25174e57d71bff5c598 Mon Sep 17 00:00:00 2001 From: Phillip Holmes Date: Mon, 29 Sep 2014 16:23:41 -0500 Subject: [PATCH 003/236] Route53 fix - forcing zone_in, record_in to lower case Fixed the .tolower to .lower() for correct syntax (copied change from older notes). --- cloud/route53.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/route53.py b/cloud/route53.py index d0723a3d0e6..0d7fdcbade5 100644 --- a/cloud/route53.py +++ b/cloud/route53.py @@ -178,9 +178,9 @@ def main(): module = AnsibleModule(argument_spec=argument_spec) command_in = module.params.get('command') - zone_in = module.params.get('zone').tolower + zone_in = module.params.get('zone').lower() ttl_in = module.params.get('ttl') - record_in = module.params.get('record').tolower + record_in = module.params.get('record').lower() type_in = module.params.get('type') value_in = module.params.get('value') retry_interval_in = module.params.get('retry_interval') From 37d99031693cb9672d4337bd8c2f1935531ced94 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Mon, 29 Sep 2014 18:07:41 -0400 Subject: [PATCH 004/236] Add hostname to generated user SSH key comment The default is not very useful to sort between different keys and user. Adding the hostname in the comment permit to later sort them if you start to reuse the key and set them in different servers. See https://github.com/ansible/ansible/pull/7420 for the rational. --- system/user.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/system/user.py b/system/user.py index 551384a7a67..95e48b4a21a 100644 --- a/system/user.py +++ b/system/user.py @@ -159,7 +159,7 @@ options: - Optionally specify the SSH key filename. ssh_key_comment: required: false - default: ansible-generated + default: ansible-generated on $HOSTNAME version_added: "0.9" description: - Optionally define the comment for the SSH key. @@ -198,6 +198,7 @@ import pwd import grp import syslog import platform +import socket try: import spwd @@ -1453,7 +1454,7 @@ def main(): 'bits': '2048', 'type': 'rsa', 'passphrase': None, - 'comment': 'ansible-generated' + 'comment': 'ansible-generated on %s' % socket.gethostname() } module = AnsibleModule( argument_spec = dict( From 82af0743820fa901423132cd2afa8ee1358315ce Mon Sep 17 00:00:00 2001 From: kustodian Date: Tue, 30 Sep 2014 00:33:55 +0200 Subject: [PATCH 005/236] Set selinux state to 'permissive' for state=disabled --- system/selinux.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/system/selinux.py b/system/selinux.py index 53e53d1d49c..908bbc250ec 100644 --- a/system/selinux.py +++ b/system/selinux.py @@ -174,14 +174,19 @@ def main(): if (state != runtime_state): if module.check_mode: module.exit_json(changed=True) - if (state == 'disabled'): - msgs.append('state change will take effect next reboot') - else: - if (runtime_enabled): + if (runtime_enabled): + if (state == 'disabled'): + if (runtime_state != 'permissive'): + # Temporarily set state to permissive + set_state('permissive') + msgs.append('runtime state temporarily changed from \'%s\' to \'permissive\', state change will take effect next reboot' % (runtime_state)) + else: + msgs.append('state change will take effect next reboot') + else: set_state(state) msgs.append('runtime state changed from \'%s\' to \'%s\'' % (runtime_state, state)) - else: - msgs.append('state change will take effect next reboot') + else: + msgs.append('state change will take effect next reboot') changed=True if (state != config_state): From ff36edbb1103274a787f3158bcb7b1d0555a2722 Mon Sep 17 00:00:00 2001 From: billwanjohi Date: Mon, 29 Sep 2014 22:42:28 +0000 Subject: [PATCH 006/236] user: add expired state ported from https://github.com/ansible/ansible/pull/6303 It's very useful and routine to disable a *nix user. I implemented expired instead of locked because this prevents any use of the account, safer than just preventing password-based authentication. I have tests [1], but since none of the suite came along with the core modules, I'm unsure how to submit them. [1] https://github.com/billwanjohi/ansible/blob/add_locked_state/test/integration/roles/test_user/tasks/main.yml --- system/user.py | 62 ++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 57 insertions(+), 5 deletions(-) diff --git a/system/user.py b/system/user.py index 551384a7a67..57f1693b646 100644 --- a/system/user.py +++ b/system/user.py @@ -84,10 +84,12 @@ options: state: required: false default: "present" - choices: [ present, absent ] + choices: [ present, absent, expired ] description: - - Whether the account should exist. When C(absent), removes - the user account. + - Whether the account should exist, and whether it is expired. + When C(absent), removes the user account. + When C(expired), the user will not be able to login through any means. + Expired state is only implemented for Linux. createhome: required: false default: "yes" @@ -318,6 +320,10 @@ class User(object): cmd.append('-s') cmd.append(self.shell) + if self.state == 'expired': + cmd.append('--expiredate') + cmd.append('1') + if self.password is not None: cmd.append('-p') cmd.append(self.password) @@ -424,6 +430,10 @@ class User(object): cmd.append('-s') cmd.append(self.shell) + if self.state == 'expired': + cmd.append('--expiredate') + cmd.append('1') + if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd.append('-p') cmd.append(self.password) @@ -692,6 +702,10 @@ class FreeBsdUser(User): cmd.append('-L') cmd.append(self.login_class) + if self.state == 'expired': + cmd.append('-e') + cmd.append('1970-01-01') + # system cannot be handled currently - should we error if its requested? # create the user (rc, out, err) = self.execute_command(cmd) @@ -776,6 +790,10 @@ class FreeBsdUser(User): new_groups = groups | set(current_groups) cmd.append(','.join(new_groups)) + if self.state == 'expired': + cmd.append('-e') + cmd.append('1970-01-01') + # modify the user if cmd will do anything if cmd_len != len(cmd): (rc, out, err) = self.execute_command(cmd) @@ -852,6 +870,10 @@ class OpenBSDUser(User): cmd.append('-L') cmd.append(self.login_class) + if self.state == 'expired': + cmd.append('-e') + cmd.append('1') + if self.password is not None: cmd.append('-p') cmd.append(self.password) @@ -946,6 +968,10 @@ class OpenBSDUser(User): cmd.append('-L') cmd.append(self.login_class) + if self.state == 'expired': + cmd.append('-e') + cmd.append('1') + if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd.append('-p') cmd.append(self.password) @@ -1019,6 +1045,10 @@ class NetBSDUser(User): cmd.append('-L') cmd.append(self.login_class) + if self.state == 'expired': + cmd.append('-e') + cmd.append('1') + if self.password is not None: cmd.append('-p') cmd.append(self.password) @@ -1101,6 +1131,10 @@ class NetBSDUser(User): cmd.append('-L') cmd.append(self.login_class) + if self.state == 'expired': + cmd.append('-e') + cmd.append('1') + if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd.append('-p') cmd.append(self.password) @@ -1178,6 +1212,10 @@ class SunOS(User): if self.createhome: cmd.append('-m') + if self.state == 'expired': + cmd.append('-e') + cmd.append('1/1/70') + cmd.append(self.name) if self.module.check_mode: @@ -1262,6 +1300,10 @@ class SunOS(User): cmd.append('-s') cmd.append(self.shell) + if self.state == 'expired': + cmd.append('-e') + cmd.append('1/1/70') + if self.module.check_mode: return (0, '', '') else: @@ -1351,6 +1393,10 @@ class AIX(User): if self.createhome: cmd.append('-m') + if self.state == 'expired': + cmd.append('-e') + cmd.append('0101000070') + cmd.append(self.name) (rc, out, err) = self.execute_command(cmd) @@ -1420,6 +1466,9 @@ class AIX(User): cmd.append('-s') cmd.append(self.shell) + if self.state == 'expired': + cmd.append('-e') + cmd.append('0101000070') # skip if no changes to be made if len(cmd) == 1: @@ -1457,7 +1506,7 @@ def main(): } module = AnsibleModule( argument_spec = dict( - state=dict(default='present', choices=['present', 'absent'], type='str'), + state=dict(default='present', choices=['present', 'absent', 'expired'], type='str'), name=dict(required=True, aliases=['user'], type='str'), uid=dict(default=None, type='str'), non_unique=dict(default='no', type='bool'), @@ -1512,7 +1561,10 @@ def main(): module.fail_json(name=user.name, msg=err, rc=rc) result['force'] = user.force result['remove'] = user.remove - elif user.state == 'present': + elif user.state == 'expired' and user.platform != 'Generic': + module.fail_json(name=user.state, + msg='expired state not yet support for {0} platform'.format(user.platform)) + elif user.state == 'present' or user.state == 'expired': if not user.user_exists(): if module.check_mode: module.exit_json(changed=True) From 204a0dc1313f7b64ce8e279cfd5f31f7b22a3658 Mon Sep 17 00:00:00 2001 From: Nate Coraor Date: Wed, 1 Oct 2014 00:31:33 -0400 Subject: [PATCH 007/236] Fix a few bugs and misbehavior in the hg module: 1. Don't pull when `dest` is already at the desired changeset. 2. Don't change the working copy when cleaning or pulling and a revision was specified. 3. Change the default for the `revision` param to match the behavior of hg. --- source_control/hg.py | 40 +++++++++++++++++++++++++++++++++------- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/source_control/hg.py b/source_control/hg.py index 1b95bcd5ac3..c2bd0d9d953 100644 --- a/source_control/hg.py +++ b/source_control/hg.py @@ -2,6 +2,7 @@ #-*- coding: utf-8 -*- # (c) 2013, Yeukhon Wong +# (c) 2014, Nate Coraor # # This module was originally inspired by Brad Olson's ansible-module-mercurial # . This module tends @@ -49,7 +50,7 @@ options: - Equivalent C(-r) option in hg command which could be the changeset, revision number, branch name or even tag. required: false - default: "default" + default: null aliases: [ version ] force: description: @@ -128,7 +129,10 @@ class Hg(object): if not before: return False - (rc, out, err) = self._command(['update', '-C', '-R', self.dest]) + args = ['update', '-C', '-R', self.dest] + if self.revision is not None: + args = args + ['-r', self.revision] + (rc, out, err) = self._command(args) if rc != 0: self.module.fail_json(msg=err) @@ -170,13 +174,30 @@ class Hg(object): ['pull', '-R', self.dest, self.repo]) def update(self): + if self.revision is not None: + return self._command(['update', '-r', self.revision, '-R', self.dest]) return self._command(['update', '-R', self.dest]) def clone(self): - return self._command(['clone', self.repo, self.dest, '-r', self.revision]) + if self.revision is not None: + return self._command(['clone', self.repo, self.dest, '-r', self.revision]) + return self._command(['clone', self.repo, self.dest]) - def switch_version(self): - return self._command(['update', '-r', self.revision, '-R', self.dest]) + @property + def at_revision(self): + """ + There is no point in pulling from a potentially down/slow remote site + if the desired changeset is already the current changeset. + """ + if self.revision is None or len(self.revision) < 7: + # Assume it's a rev number, tag, or branch + return False + (rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest]) + if rc != 0: + self.module.fail_json(msg=err) + if out.startswith(self.revision): + return True + return False # =========================================== @@ -185,7 +206,7 @@ def main(): argument_spec = dict( repo = dict(required=True, aliases=['name']), dest = dict(required=True), - revision = dict(default="default", aliases=['version']), + revision = dict(default=None, aliases=['version']), force = dict(default='yes', type='bool'), purge = dict(default='no', type='bool'), executable = dict(default=None), @@ -212,6 +233,12 @@ def main(): (rc, out, err) = hg.clone() if rc != 0: module.fail_json(msg=err) + elif hg.at_revision: + # no update needed, don't pull + before = hg.get_revision() + + # but force and purge if desired + cleaned = hg.cleanup(force, purge) else: # get the current state before doing pulling before = hg.get_revision() @@ -227,7 +254,6 @@ def main(): if rc != 0: module.fail_json(msg=err) - hg.switch_version() after = hg.get_revision() if before != after or cleaned: changed = True From 6157a6552fa236c92a938e70f975c62cc87ac780 Mon Sep 17 00:00:00 2001 From: Brendan Jurd Date: Thu, 2 Oct 2014 16:32:30 +1000 Subject: [PATCH 008/236] Add word boundary in apache2_module regexp Add a word boundary \b to the regexp for checking the output of a2{en,dis}mod, to avoid a false positive for a module that ends with the same text as the module we're working on. For example, the previous regexp r'.*spam already enabled' would also match against 'eggs_spam already enabled'. Also, get rid of the redundant '.*' from the end of the regexp. --- web_infrastructure/apache2_module.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web_infrastructure/apache2_module.py b/web_infrastructure/apache2_module.py index 39351482087..13b9e82136f 100644 --- a/web_infrastructure/apache2_module.py +++ b/web_infrastructure/apache2_module.py @@ -51,7 +51,7 @@ def _disable_module(module): a2dismod_binary = module.get_bin_path("a2dismod") result, stdout, stderr = module.run_command("%s %s" % (a2dismod_binary, name)) - if re.match(r'.*' + name + r' already disabled.*', stdout, re.S): + if re.match(r'.*\b' + name + r' already disabled', stdout, re.S): module.exit_json(changed = False, result = "Success") elif result != 0: module.fail_json(msg="Failed to disable module %s: %s" % (name, stdout)) @@ -63,7 +63,7 @@ def _enable_module(module): a2enmod_binary = module.get_bin_path("a2enmod") result, stdout, stderr = module.run_command("%s %s" % (a2enmod_binary, name)) - if re.match(r'.*' + name + r' already enabled.*', stdout, re.S): + if re.match(r'.*\b' + name + r' already enabled', stdout, re.S): module.exit_json(changed = False, result = "Success") elif result != 0: module.fail_json(msg="Failed to enable module %s: %s" % (name, stdout)) From 1b973907658374d146f7a4126a6b9979171db326 Mon Sep 17 00:00:00 2001 From: Jonathan Armani Date: Wed, 27 Aug 2014 22:26:47 +0200 Subject: [PATCH 009/236] Add enable / disable of services for OpenBSD if rcctl is present --- system/service.py | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/system/service.py b/system/service.py index b235ee25c57..d708a848bd3 100644 --- a/system/service.py +++ b/system/service.py @@ -945,9 +945,7 @@ class FreeBsdService(Service): class OpenBsdService(Service): """ This is the OpenBSD Service manipulation class - it uses /etc/rc.d for - service control. Enabling a service is currently not supported because the - _flags variable is not boolean, you should supply a rc.conf.local - file in some other way. + service control. Enabling a service is currently supported if rcctl is present """ platform = 'OpenBSD' @@ -963,6 +961,8 @@ class OpenBsdService(Service): if not self.svc_cmd: self.module.fail_json(msg='unable to find rc.d script') + self.enable_cmd = self.module.get_bin_path('rcctl') + def get_service_status(self): rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'check')) if rc == 1: @@ -971,7 +971,27 @@ class OpenBsdService(Service): self.running = True def service_control(self): - return self.execute_command("%s %s" % (self.svc_cmd, self.action)) + return self.execute_command("%s -f %s" % (self.svc_cmd, self.action)) + + def service_enable(self): + if not self.enable_cmd: + return super(OpenBsdService, self).service_enable() + + rc, stdout, stderr = self.execute_command("%s %s %s" % (self.enable_cmd, 'status', self.name)) + + if self.enable: + action = "enable %s flags %s" % (self.name, self.arguments) + args = self.arguments + if rc == 0: + return + else: + action = "disable %s" % self.name + if rc == 1: + return + + # XXX check rc ? + rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, action)) + self.changed = True # =========================================== # Subclass: NetBSD From c46e030100dbde9f8a8d71263a3a9a11b45880df Mon Sep 17 00:00:00 2001 From: Patrik Lundin Date: Thu, 28 Aug 2014 14:44:43 +0200 Subject: [PATCH 010/236] Make "enabled" code aware of --check mode. --- system/service.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/system/service.py b/system/service.py index d708a848bd3..e17eb610872 100644 --- a/system/service.py +++ b/system/service.py @@ -989,6 +989,9 @@ class OpenBsdService(Service): if rc == 1: return + if self.module.check_mode: + self.module.exit_json(changed=True, msg="changing service enablement") + # XXX check rc ? rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, action)) self.changed = True From 1a8cdb5e3e68b710cec1c0b2d7d94f48d7f1586b Mon Sep 17 00:00:00 2001 From: Patrik Lundin Date: Thu, 28 Aug 2014 16:33:39 +0200 Subject: [PATCH 011/236] Check rc and print error message if any. It is probably good to use stdout before printing a generic error message as well. --- system/service.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index e17eb610872..238612ea30a 100644 --- a/system/service.py +++ b/system/service.py @@ -992,8 +992,16 @@ class OpenBsdService(Service): if self.module.check_mode: self.module.exit_json(changed=True, msg="changing service enablement") - # XXX check rc ? rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, action)) + + if rc != 0: + if stderr: + self.module.fail_json(msg=stderr) + elif stdout: + self.module.fail_json(msg=stdout) + else: + self.module.fail_json(msg="rcctl failed to modify service enablement") + self.changed = True # =========================================== From c6dd88c1d162e1a16749577e5d4df9693583389e Mon Sep 17 00:00:00 2001 From: Patrik Lundin Date: Thu, 28 Aug 2014 16:50:37 +0200 Subject: [PATCH 012/236] Fail if "rcctl status" writes to stderr. --- system/service.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/system/service.py b/system/service.py index 238612ea30a..75450aa4c13 100644 --- a/system/service.py +++ b/system/service.py @@ -987,6 +987,8 @@ class OpenBsdService(Service): else: action = "disable %s" % self.name if rc == 1: + if stderr: + self.module.fail_json(msg=stderr) return if self.module.check_mode: From 5f37624eb4debca831a3b44a39b2b3b96f1872aa Mon Sep 17 00:00:00 2001 From: Patrik Lundin Date: Sun, 31 Aug 2014 11:58:37 +0200 Subject: [PATCH 013/236] Tweak error checking for "enabled" code. Based on input from @jarmani: * A return value of 2 now means a service does not exist. Instead of trying to handle the different meanings of rc after running "status", just look at stderr to know if something failed. * Skip looking at stdout to make the code cleaner. Any errors should turn up on stderr. --- system/service.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/system/service.py b/system/service.py index 75450aa4c13..b5c6da9dee8 100644 --- a/system/service.py +++ b/system/service.py @@ -979,6 +979,9 @@ class OpenBsdService(Service): rc, stdout, stderr = self.execute_command("%s %s %s" % (self.enable_cmd, 'status', self.name)) + if stderr: + self.module.fail_json(msg=stderr) + if self.enable: action = "enable %s flags %s" % (self.name, self.arguments) args = self.arguments @@ -987,8 +990,6 @@ class OpenBsdService(Service): else: action = "disable %s" % self.name if rc == 1: - if stderr: - self.module.fail_json(msg=stderr) return if self.module.check_mode: @@ -999,8 +1000,6 @@ class OpenBsdService(Service): if rc != 0: if stderr: self.module.fail_json(msg=stderr) - elif stdout: - self.module.fail_json(msg=stdout) else: self.module.fail_json(msg="rcctl failed to modify service enablement") From 924cf20cf8ad955af8a187cbdbe508852eac0d3c Mon Sep 17 00:00:00 2001 From: Patrik Lundin Date: Sat, 6 Sep 2014 19:48:14 +0200 Subject: [PATCH 014/236] Depend more on rcctl if it is present. * Make the module support enable/disable of special services like pf via rcctl. Idea and method from @jarmani. * Make the module handle when the user supplied 'arguments' variable does not match the current flags in rc.conf.local. * Update description now that the code tries to use rcctl for everything if it is available. --- system/service.py | 45 +++++++++++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/system/service.py b/system/service.py index b5c6da9dee8..ac266087e85 100644 --- a/system/service.py +++ b/system/service.py @@ -944,34 +944,48 @@ class FreeBsdService(Service): class OpenBsdService(Service): """ - This is the OpenBSD Service manipulation class - it uses /etc/rc.d for - service control. Enabling a service is currently supported if rcctl is present + This is the OpenBSD Service manipulation class - it uses rcctl(8) or + /etc/rc.d scripts for service control. Enabling a service is + only supported if rcctl is present. """ platform = 'OpenBSD' distribution = None def get_service_tools(self): - rcdir = '/etc/rc.d' + self.enable_cmd = self.module.get_bin_path('rcctl') - rc_script = "%s/%s" % (rcdir, self.name) - if os.path.isfile(rc_script): - self.svc_cmd = rc_script + if self.enable_cmd: + self.svc_cmd = self.enable_cmd + else: + rcdir = '/etc/rc.d' - if not self.svc_cmd: - self.module.fail_json(msg='unable to find rc.d script') + rc_script = "%s/%s" % (rcdir, self.name) + if os.path.isfile(rc_script): + self.svc_cmd = rc_script - self.enable_cmd = self.module.get_bin_path('rcctl') + if not self.svc_cmd: + self.module.fail_json(msg='unable to find svc_cmd') def get_service_status(self): - rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'check')) + if self.enable_cmd: + rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svc_cmd, 'check', self.name)) + else: + rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'check')) + + if stderr: + self.module.fail_json(msg=stderr) + if rc == 1: self.running = False elif rc == 0: self.running = True def service_control(self): - return self.execute_command("%s -f %s" % (self.svc_cmd, self.action)) + if self.enable_cmd: + return self.execute_command("%s -f %s %s" % (self.svc_cmd, self.action, self.name)) + else: + return self.execute_command("%s -f %s" % (self.svc_cmd, self.action)) def service_enable(self): if not self.enable_cmd: @@ -982,10 +996,13 @@ class OpenBsdService(Service): if stderr: self.module.fail_json(msg=stderr) + current_flags = stdout.rstrip() + if self.enable: - action = "enable %s flags %s" % (self.name, self.arguments) - args = self.arguments - if rc == 0: + action = "enable %s" % (self.name) + if self.arguments or self.arguments != current_flags: + action = action + " flags %s" % (self.arguments) + if rc == 0 and self.arguments == current_flags: return else: action = "disable %s" % self.name From e463400412d30b0c26c474af625f799241a7c4cf Mon Sep 17 00:00:00 2001 From: Patrik Lundin Date: Tue, 9 Sep 2014 16:12:47 +0200 Subject: [PATCH 015/236] Simplify self.arguments logic. Strange logic pointed out by @jarmani, thanks! --- system/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index ac266087e85..484f007cac9 100644 --- a/system/service.py +++ b/system/service.py @@ -1000,7 +1000,7 @@ class OpenBsdService(Service): if self.enable: action = "enable %s" % (self.name) - if self.arguments or self.arguments != current_flags: + if self.arguments or current_flags: action = action + " flags %s" % (self.arguments) if rc == 0 and self.arguments == current_flags: return From 8d9f6053d3c4f8c15bf2d1fdff79efe3f6637255 Mon Sep 17 00:00:00 2001 From: Ricky Cook Date: Wed, 8 Oct 2014 22:19:26 +1100 Subject: [PATCH 016/236] Simplify command module option parsing --- commands/command.py | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/commands/command.py b/commands/command.py index c584d6feed8..90a94fd8369 100644 --- a/commands/command.py +++ b/commands/command.py @@ -18,6 +18,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +import copy import sys import datetime import traceback @@ -99,12 +100,21 @@ EXAMPLES = ''' creates: /path/to/database ''' +OPTIONS = {'chdir': None, + 'creates': None, + 'executable': None, + 'NO_LOG': None, + 'removes': None, + 'warn': True, + } + # This is a pretty complex regex, which functions as follows: # # 1. (^|\s) # ^ look for a space or the beginning of the line -# 2. (creates|removes|chdir|executable|NO_LOG)= -# ^ look for a valid param, followed by an '=' +# 2. ({options_list})= +# ^ expanded to (chdir|creates|executable...)= +# look for a valid param, followed by an '=' # 3. (?P[\'"])? # ^ look for an optional quote character, which can either be # a single or double quote character, and store it for later @@ -114,8 +124,12 @@ EXAMPLES = ''' # ^ a non-escaped space or a non-escaped quote of the same kind # that was matched in the first 'quote' is found, or the end of # the line is reached - -PARAM_REGEX = re.compile(r'(^|\s)(creates|removes|chdir|executable|NO_LOG|warn)=(?P[\'"])?(.*?)(?(quote)(?[\'"])?(.*?)(?(quote)(? Date: Wed, 8 Oct 2014 22:25:02 +1100 Subject: [PATCH 017/236] Add comment to command options dict --- commands/command.py | 1 + 1 file changed, 1 insertion(+) diff --git a/commands/command.py b/commands/command.py index 90a94fd8369..75927a5ba0b 100644 --- a/commands/command.py +++ b/commands/command.py @@ -100,6 +100,7 @@ EXAMPLES = ''' creates: /path/to/database ''' +# Dict of options and their defaults OPTIONS = {'chdir': None, 'creates': None, 'executable': None, From b195b5a6bb65acfbfddc61885df1fe9d721c34a3 Mon Sep 17 00:00:00 2001 From: Ricky Cook Date: Wed, 8 Oct 2014 22:30:20 +1100 Subject: [PATCH 018/236] Get warn option same as other args --- commands/command.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/command.py b/commands/command.py index 75927a5ba0b..44c95a3d5bd 100644 --- a/commands/command.py +++ b/commands/command.py @@ -163,7 +163,7 @@ def main(): args = module.params['args'] creates = module.params['creates'] removes = module.params['removes'] - warn = module.params.get('warn', True) + warn = module.params['warn'] if args.strip() == '': module.fail_json(rc=256, msg="no command given") From 6db328c79a8c1f406fdab4e901732ecc9682ced3 Mon Sep 17 00:00:00 2001 From: Ricky Cook Date: Wed, 8 Oct 2014 22:59:03 +1100 Subject: [PATCH 019/236] Fix regex string format --- commands/command.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/command.py b/commands/command.py index 44c95a3d5bd..2b79b327d71 100644 --- a/commands/command.py +++ b/commands/command.py @@ -127,7 +127,7 @@ OPTIONS = {'chdir': None, # the line is reached OPTIONS_REGEX = '|'.join(OPTIONS.keys()) PARAM_REGEX = re.compile( - r'(^|\s)({options_list})=(?P[\'"])?(.*?)(?(quote)(?[\'"])?(.*?)(?(quote)(? Date: Sun, 12 Oct 2014 18:32:41 +0200 Subject: [PATCH 020/236] Multiple fixes for OpenBSD rcctl handling. * Use the newly added 'default' argument to know if the default flags are set or not. * Handle that 'status' may either return flags or YES/NO. * Centralize flag handling logic. * Set action variable after check if we need to keep going. Big thanks to @ajacoutot for implementing the rcctl 'default' argument. --- system/service.py | 40 ++++++++++++++++++++++++++++++++++------ 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/system/service.py b/system/service.py index 484f007cac9..e8708682eb8 100644 --- a/system/service.py +++ b/system/service.py @@ -991,24 +991,52 @@ class OpenBsdService(Service): if not self.enable_cmd: return super(OpenBsdService, self).service_enable() + rc, stdout, stderr = self.execute_command("%s %s %s" % (self.enable_cmd, 'default', self.name)) + + if stderr: + self.module.fail_json(msg=stderr) + + default_flags = stdout.rstrip() + rc, stdout, stderr = self.execute_command("%s %s %s" % (self.enable_cmd, 'status', self.name)) if stderr: self.module.fail_json(msg=stderr) - current_flags = stdout.rstrip() + status_string = stdout.rstrip() + + # Depending on the service the string returned from 'status' may be + # either a set of flags or the boolean YES/NO + if status_string == "YES" or status_string == "N0": + current_flags = '' + else: + current_flags = status_string + + # If there are arguments from the user we use these as flags unless + # they are already set. + if self.arguments and self.arguments != current_flags: + changed_flags = self.arguments + # If the user has not supplied any arguments and the current flags + # differ from the default we reset them. + elif not self.arguments and current_flags != default_flags: + changed_flags = ' ' + # Otherwise there is no need to modify flags. + else: + changed_flags = '' if self.enable: - action = "enable %s" % (self.name) - if self.arguments or current_flags: - action = action + " flags %s" % (self.arguments) - if rc == 0 and self.arguments == current_flags: + if rc == 0 and not changed_flags: return + + action = "enable %s" % (self.name) + if changed_flags: + action = action + " flags %s" % (changed_flags) else: - action = "disable %s" % self.name if rc == 1: return + action = "disable %s" % self.name + if self.module.check_mode: self.module.exit_json(changed=True, msg="changing service enablement") From 4f2b99c1e079b266256beb854e36b22310b82ad9 Mon Sep 17 00:00:00 2001 From: anatoly techtonik Date: Mon, 10 Nov 2014 20:32:05 +0300 Subject: [PATCH 021/236] acl: Fix X support in ACL permissions If you try to set rwX permissions, ACL fails to set them at all. Expected: $ sudo setfacl -m 'group::rwX' www ... drwxrwxr-x 2 root root 4096 Nov 10 17:09 www With Ansible: acl: name=/var/www permissions=rwX etype=group state=present ... drwxrw-r-x 2 root root 4096 Nov 10 17:30 www x for group is erased. =/ --- files/acl.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/files/acl.py b/files/acl.py index 30c533e006c..9790f8c927f 100644 --- a/files/acl.py +++ b/files/acl.py @@ -111,6 +111,9 @@ def normalize_permissions(p): perms[1] = 'w' if char == 'x': perms[2] = 'x' + if char == 'X': + if perms[2] != 'x': # 'x' is more permissive + perms[2] = 'X' return ''.join(perms) def split_entry(entry): From ae1af202e83da9ecf312acf6e586583c8bb13f47 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Mon, 29 Sep 2014 19:33:40 -0400 Subject: [PATCH 022/236] Fixes --enable-repo for Oracle Linux --- packaging/os/yum.py | 52 ++++++++++++++++++--------------------------- 1 file changed, 21 insertions(+), 31 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index c3158077d18..6dc1e532069 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -237,13 +237,11 @@ def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_ else: myrepoq = list(repoq) - for repoid in dis_repos: - r_cmd = ['--disablerepo', repoid] - myrepoq.extend(r_cmd) + r_cmd = ['--disablerepo', ','.join(dis_repos)] + myrepoq.extend(r_cmd) - for repoid in en_repos: - r_cmd = ['--enablerepo', repoid] - myrepoq.extend(r_cmd) + r_cmd = ['--enablerepo', ','.join(en_repos)] + myrepoq.extend(r_cmd) cmd = myrepoq + ["--qf", qf, pkgspec] rc,out,err = module.run_command(cmd) @@ -286,13 +284,11 @@ def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_rep else: myrepoq = list(repoq) - for repoid in dis_repos: - r_cmd = ['--disablerepo', repoid] - myrepoq.extend(r_cmd) + r_cmd = ['--disablerepo', ','.join(dis_repos)] + myrepoq.extend(r_cmd) - for repoid in en_repos: - r_cmd = ['--enablerepo', repoid] - myrepoq.extend(r_cmd) + r_cmd = ['--enablerepo', ','.join(en_repos)] + myrepoq.extend(r_cmd) cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec] rc,out,err = module.run_command(cmd) @@ -331,13 +327,11 @@ def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=[], d else: myrepoq = list(repoq) - for repoid in dis_repos: - r_cmd = ['--disablerepo', repoid] - myrepoq.extend(r_cmd) + r_cmd = ['--disablerepo', ','.join(dis_repos)] + myrepoq.extend(r_cmd) - for repoid in en_repos: - r_cmd = ['--enablerepo', repoid] - myrepoq.extend(r_cmd) + r_cmd = ['--enablerepo', ','.join(en_repos)] + myrepoq.extend(r_cmd) cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec] rc,out,err = module.run_command(cmd) @@ -672,7 +666,7 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): nothing_to_do = False break - if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos): + if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=en_repos): nothing_to_do = False break @@ -734,27 +728,24 @@ def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo, en_repos = [] if disablerepo: dis_repos = disablerepo.split(',') + r_cmd = ['--disablerepo=%s' % disablerepo] + yum_basecmd.extend(r_cmd) if enablerepo: en_repos = enablerepo.split(',') - - for repoid in dis_repos: - r_cmd = ['--disablerepo=%s' % repoid] + r_cmd = ['--enablerepo=%s' % enablerepo] yum_basecmd.extend(r_cmd) + - for repoid in en_repos: - r_cmd = ['--enablerepo=%s' % repoid] - yum_basecmd.extend(r_cmd) if state in ['installed', 'present', 'latest']: my = yum_base(conf_file) try: - for r in dis_repos: - my.repos.disableRepo(r) - + if disablerepo: + my.repos.disableRepo(disablerepo) current_repos = my.repos.repos.keys() - for r in en_repos: + if enablerepo: try: - my.repos.enableRepo(r) + my.repos.enableRepo(enablerepo) new_repos = my.repos.repos.keys() for i in new_repos: if not i in current_repos: @@ -765,7 +756,6 @@ def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo, module.fail_json(msg="Error setting/accessing repo %s: %s" % (r, e)) except yum.Errors.YumBaseError, e: module.fail_json(msg="Error accessing repos: %s" % e) - if state in ['installed', 'present']: if disable_gpg_check: yum_basecmd.append('--nogpgcheck') From 16b251d743742eb99c73a97656f1c998ef2c1c29 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Mon, 29 Sep 2014 19:11:45 -0400 Subject: [PATCH 023/236] adds error message if socket does not exist --- database/mysql/mysql_db.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index 38dee608ba5..31c0e309eac 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -102,6 +102,7 @@ EXAMPLES = ''' import ConfigParser import os import pipes +import stat try: import MySQLdb except ImportError: @@ -281,6 +282,7 @@ def main(): collation = module.params["collation"] state = module.params["state"] target = module.params["target"] + socket = module.params["login_unix_socket"] # make sure the target path is expanded for ~ and $HOME if target is not None: @@ -310,8 +312,14 @@ def main(): else: connect_to_db = 'mysql' try: - if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db=connect_to_db) + if socket: + try: + socketmode = os.stat(socket).st_mode + if not stat.S_ISSOCK(socketmode): + module.fail_json(msg="%s, is not a socket, unable to connect" % socket) + except OSError: + module.fail_json(msg="%s, does not exist, unable to connect" % socket) + db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=socket, user=login_user, passwd=login_password, db=connect_to_db) elif module.params["login_port"] != "3306" and module.params["login_host"] == "localhost": module.fail_json(msg="login_host is required when login_port is defined, login_host cannot be localhost when login_port is defined") else: @@ -322,7 +330,7 @@ def main(): errno, errstr = e.args module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) else: - module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check ~/.my.cnf contains credentials") + module.fail_json(msg="unable to connect, check login credentials (login_user, and login_password, which can be defined in ~/.my.cnf), check that mysql socket exists and mysql server is running") changed = False if db_exists(cursor, db): From eea4d068482a9d056e84a259ca712cea8ea69302 Mon Sep 17 00:00:00 2001 From: Patrik Lundin Date: Thu, 13 Nov 2014 12:39:29 +0100 Subject: [PATCH 024/236] Fix typo: Replace "N0" with "NO". --- system/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index e8708682eb8..2d62edc67ac 100644 --- a/system/service.py +++ b/system/service.py @@ -1007,7 +1007,7 @@ class OpenBsdService(Service): # Depending on the service the string returned from 'status' may be # either a set of flags or the boolean YES/NO - if status_string == "YES" or status_string == "N0": + if status_string == "YES" or status_string == "NO": current_flags = '' else: current_flags = status_string From 4558ed69259bfc8ced6c5f76a78ec6091376eea0 Mon Sep 17 00:00:00 2001 From: Henry Finucane Date: Mon, 29 Sep 2014 18:55:34 -0700 Subject: [PATCH 025/236] dpkg does not take a --force-yes option --- packaging/os/apt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 5c557900b76..13103c02320 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -358,7 +358,7 @@ def install_deb(m, debs, cache, force, install_recommends, dpkg_options): if m.check_mode: options += " --simulate" if force: - options += " --force-yes" + options += " --force-all" cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install)) rc, out, err = m.run_command(cmd) From 30c7f9a7d377cd225f2e0e43b0990839867d561b Mon Sep 17 00:00:00 2001 From: Will Thames Date: Sat, 1 Nov 2014 12:44:44 +1000 Subject: [PATCH 026/236] Added better region handling and enabled eu-central-1 Make use of improved connect_to_aws that throws an exception if a region can't be connected to (e.g. eu-central-1 requires boto 2.34 onwards) Add eu-central-1 to the two modules that hardcode their regions Add us-gov-west-1 to ec2_ami_search to match documentation! This pull request makes use of the changes in ansible/ansible#9419 --- cloud/amazon/ec2_ami_search.py | 7 +++++-- cloud/amazon/ec2_asg.py | 2 +- cloud/amazon/ec2_elb.py | 4 ++-- cloud/amazon/ec2_elb_lb.py | 2 +- cloud/amazon/ec2_facts.py | 1 + cloud/amazon/ec2_lc.py | 2 +- cloud/amazon/ec2_metric_alarm.py | 2 +- cloud/amazon/ec2_scaling_policy.py | 4 +--- 8 files changed, 13 insertions(+), 11 deletions(-) diff --git a/cloud/amazon/ec2_ami_search.py b/cloud/amazon/ec2_ami_search.py index 25875de39bd..36a0ab38f22 100644 --- a/cloud/amazon/ec2_ami_search.py +++ b/cloud/amazon/ec2_ami_search.py @@ -56,7 +56,8 @@ options: required: false default: us-east-1 choices: ["ap-northeast-1", "ap-southeast-1", "ap-southeast-2", - "eu-west-1", "sa-east-1", "us-east-1", "us-west-1", "us-west-2", "us-gov-west-1"] + "eu-central-1", "eu-west-1", "sa-east-1", "us-east-1", + "us-west-1", "us-west-2", "us-gov-west-1"] virt: description: virutalization type required: false @@ -88,11 +89,13 @@ SUPPORTED_DISTROS = ['ubuntu'] AWS_REGIONS = ['ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', + 'eu-central-1', 'eu-west-1', 'sa-east-1', 'us-east-1', 'us-west-1', - 'us-west-2'] + 'us-west-2', + "us-gov-west-1"] def get_url(module, url): diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 2b060ccca37..8f08aaf874e 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -272,7 +272,7 @@ def create_autoscaling_group(connection, module): region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) asg_tags = [] diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py index 42cb1819025..41883de15ce 100644 --- a/cloud/amazon/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -258,7 +258,7 @@ class ElbManager: try: elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) elbs = elb.get_all_load_balancers() @@ -278,7 +278,7 @@ class ElbManager: try: ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) return ec2.get_only_instances(instance_ids=[self.instance_id])[0] diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 462fbbcc797..4717e767600 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -374,7 +374,7 @@ class ElbManager(object): try: return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) def _delete_elb(self): diff --git a/cloud/amazon/ec2_facts.py b/cloud/amazon/ec2_facts.py index 7b5c610dc2d..c6fbf86b724 100644 --- a/cloud/amazon/ec2_facts.py +++ b/cloud/amazon/ec2_facts.py @@ -65,6 +65,7 @@ class Ec2Metadata(object): AWS_REGIONS = ('ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', + 'eu-central-1', 'eu-west-1', 'sa-east-1', 'us-east-1', diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index f75dfe6d938..c4b7f70b924 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -265,7 +265,7 @@ def main(): try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) state = module.params.get('state') diff --git a/cloud/amazon/ec2_metric_alarm.py b/cloud/amazon/ec2_metric_alarm.py index 519f88f24f8..7a8d573ce74 100644 --- a/cloud/amazon/ec2_metric_alarm.py +++ b/cloud/amazon/ec2_metric_alarm.py @@ -271,7 +271,7 @@ def main(): region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) if state == 'present': diff --git a/cloud/amazon/ec2_scaling_policy.py b/cloud/amazon/ec2_scaling_policy.py index ad1fa7ce7f1..8e7d459e3e3 100644 --- a/cloud/amazon/ec2_scaling_policy.py +++ b/cloud/amazon/ec2_scaling_policy.py @@ -163,9 +163,7 @@ def main(): try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - if not connection: - module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region)) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg = str(e)) if state == 'present': From 3c8b4bd4b931105038633aabf8c81d612ff077e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Gos=C5=82awski?= Date: Sun, 30 Nov 2014 12:32:08 +0100 Subject: [PATCH 027/236] Fix behavior when insert* doesn't match anything If insertbefore/insertafter didn't match anything, lineinfile module was doing nothing, instead of adding the line at end of fille as it's supposed to. --- files/lineinfile.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/files/lineinfile.py b/files/lineinfile.py index c72b7f9d9a9..e1a7980f38a 100644 --- a/files/lineinfile.py +++ b/files/lineinfile.py @@ -256,9 +256,9 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create, msg = 'line added' changed = True # Add it to the end of the file if requested or - # if insertafter=/insertbefore didn't match anything + # if insertafter/insertbefore didn't match anything # (so default behaviour is to add at the end) - elif insertafter == 'EOF': + elif insertafter == 'EOF' or index[1] == -1: # If the file is not empty then ensure there's a newline before the added line if len(lines)>0 and not (lines[-1].endswith('\n') or lines[-1].endswith('\r')): @@ -267,9 +267,6 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create, lines.append(line + os.linesep) msg = 'line added' changed = True - # Do nothing if insert* didn't match - elif index[1] == -1: - pass # insert* matched, but not the regexp else: lines.insert(index[1], line + os.linesep) From f5789e8eda59a475e81a8e5a2494363c94c8db1f Mon Sep 17 00:00:00 2001 From: Philip Misiowiec Date: Fri, 26 Sep 2014 23:03:22 -0700 Subject: [PATCH 028/236] Support for EC2 dedicated tenancy option --- cloud/amazon/ec2.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 04e419ea1f1..8d25c3196a8 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -67,6 +67,12 @@ options: required: true default: null aliases: [] + tenancy: + description: + - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are:"default" or "dedicated". NOTE: To use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. + required: false + default: default + aliases: [] spot_price: version_added: "1.5" description: @@ -312,6 +318,18 @@ local_action: vpc_subnet_id: subnet-29e63245 assign_public_ip: yes +# Dedicated tenancy example +- local_action: + module: ec2 + assign_public_ip: yes + group_id: sg-1dc53f72 + key_name: mykey + image: ami-6e649707 + instance_type: m1.small + tenancy: dedicated + vpc_subnet_id: subnet-29e63245 + wait: yes + # Spot instance example - local_action: module: ec2 @@ -728,6 +746,7 @@ def create_instances(module, ec2, override_count=None): group_id = module.params.get('group_id') zone = module.params.get('zone') instance_type = module.params.get('instance_type') + tenancy = module.params.get('tenancy') spot_price = module.params.get('spot_price') image = module.params.get('image') if override_count: @@ -811,6 +830,9 @@ def create_instances(module, ec2, override_count=None): if ebs_optimized: params['ebs_optimized'] = ebs_optimized + + if tenancy: + params['tenancy'] = tenancy if boto_supports_profile_name_arg(ec2): params['instance_profile_name'] = instance_profile_name @@ -1153,6 +1175,7 @@ def main(): count_tag = dict(), volumes = dict(type='list'), ebs_optimized = dict(type='bool', default=False), + tenancy = dict(default='default'), ) ) From 59701feadba064e5880a6b31ec3c019604118976 Mon Sep 17 00:00:00 2001 From: Philip Misiowiec Date: Mon, 29 Sep 2014 17:48:12 -0500 Subject: [PATCH 029/236] added version --- cloud/amazon/ec2.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 8d25c3196a8..050ed0b63f4 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -68,6 +68,7 @@ options: default: null aliases: [] tenancy: + version_added: "1.8" description: - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are:"default" or "dedicated". NOTE: To use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. required: false From 7a8586c53c9482d54db037a521a4a20ed7369daf Mon Sep 17 00:00:00 2001 From: Will Thames Date: Tue, 26 Aug 2014 22:54:57 +1000 Subject: [PATCH 030/236] RDS rewrite to use boto.rds and boto.rds2 Using rds2 allows tags and the control over whether or not DBs are publicly accessible. Move RDS towards a pair of interfaces implementing the details of rds and rds2 Added tests to ensure that all operations work correctly as well as requirements files that allow virtualenvs to test either boto.rds or boto.rds2 --- cloud/amazon/rds.py | 958 +++++++++++++++++++++++++++++--------------- 1 file changed, 633 insertions(+), 325 deletions(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index d6fd1622161..5e20e3470dc 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -20,7 +20,7 @@ module: rds version_added: "1.3" short_description: create, delete, or modify an Amazon rds instance description: - - Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. + - Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0) options: command: description: @@ -31,8 +31,8 @@ options: choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'restore' ] instance_name: description: - - Database instance identifier. - required: true + - Database instance identifier. Required except when using command=facts or command=delete on just a snapshot + required: false default: null aliases: [] source_instance: @@ -179,7 +179,7 @@ options: aliases: [] snapshot: description: - - Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. Used only when command=delete or command=snapshot. + - Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot. required: false default: null aliases: [] @@ -220,8 +220,29 @@ options: default: null aliases: [] version_added: 1.5 + character_set_name: + description: + - Associate the DB instance with a specified character set. Used with command=create. + required: false + default: null + aliases: [] + version_added: 1.8 + publicly_accessible: + description: + - explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0 + required: false + default: null + aliases: [] + version_added: 1.8 + tags: + description: + - tags to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0 + required: false + default: null + aliases: [] + version_added: 1.8 requirements: [ "boto" ] -author: Bruce Pennypacker +author: Bruce Pennypacker, Will Thames ''' # FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD @@ -274,376 +295,663 @@ except ImportError: print "failed=True msg='boto required for this module'" sys.exit(1) -def get_current_resource(conn, resource, command): - # There will be exceptions but we want the calling code to handle them - if command == 'snapshot': - return conn.get_all_dbsnapshots(snapshot_id=resource)[0] - else: - return conn.get_all_dbinstances(resource)[0] - - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'restore'], required=True), - instance_name = dict(required=True), - source_instance = dict(required=False), - db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False), - size = dict(required=False), - instance_type = dict(aliases=['type'], required=False), - username = dict(required=False), - password = dict(no_log=True, required=False), - db_name = dict(required=False), - engine_version = dict(required=False), - parameter_group = dict(required=False), - license_model = dict(choices=['license-included', 'bring-your-own-license', 'general-public-license'], required=False), - multi_zone = dict(type='bool', default=False), - iops = dict(required=False), - security_groups = dict(required=False), - vpc_security_groups = dict(type='list', required=False), - port = dict(required=False), - upgrade = dict(type='bool', default=False), - option_group = dict(required=False), - maint_window = dict(required=False), - backup_window = dict(required=False), - backup_retention = dict(required=False), - zone = dict(aliases=['aws_zone', 'ec2_zone'], required=False), - subnet = dict(required=False), - wait = dict(type='bool', default=False), - wait_timeout = dict(default=300), - snapshot = dict(required=False), - apply_immediately = dict(type='bool', default=False), - new_instance_name = dict(required=False), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - ) - - command = module.params.get('command') - instance_name = module.params.get('instance_name') - source_instance = module.params.get('source_instance') - db_engine = module.params.get('db_engine') - size = module.params.get('size') - instance_type = module.params.get('instance_type') - username = module.params.get('username') - password = module.params.get('password') - db_name = module.params.get('db_name') - engine_version = module.params.get('engine_version') - parameter_group = module.params.get('parameter_group') - license_model = module.params.get('license_model') - multi_zone = module.params.get('multi_zone') - iops = module.params.get('iops') - security_groups = module.params.get('security_groups') - vpc_security_groups = module.params.get('vpc_security_groups') - port = module.params.get('port') - upgrade = module.params.get('upgrade') - option_group = module.params.get('option_group') - maint_window = module.params.get('maint_window') - subnet = module.params.get('subnet') - backup_window = module.params.get('backup_window') - backup_retention = module.params.get('backup_retention') - region = module.params.get('region') - zone = module.params.get('zone') - aws_secret_key = module.params.get('aws_secret_key') - aws_access_key = module.params.get('aws_access_key') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - snapshot = module.params.get('snapshot') - apply_immediately = module.params.get('apply_immediately') - new_instance_name = module.params.get('new_instance_name') +try: + import boto.rds2 + has_rds2 = True +except ImportError: + has_rds2 = False - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - if not region: - module.fail_json(msg = str("region not specified and unable to determine region from EC2_REGION.")) - # connect to the rds endpoint - try: - conn = connect_to_aws(boto.rds, region, **aws_connect_params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) +class RDSConnection: + def __init__(self, module, region, **aws_connect_params): + try: + self.connection = connect_to_aws(boto.rds, region, **aws_connect_params) + except boto.exception.BotoServerError, e: + module.fail_json(msg=e.error_message) - def invalid_security_group_type(subnet): - if subnet: - return 'security_groups' - else: - return 'vpc_security_groups' + def get_db_instance(self, instancename): + try: + return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0]) + except boto.exception.BotoServerError,e: + return None - # Package up the optional parameters - params = {} + def get_db_snapshot(self, snapshotid): + try: + return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0]) + except boto.exception.BotoServerError,e: + return None - # Validate parameters for each command - if command == 'create': - required_vars = [ 'instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password' ] - invalid_vars = [ 'source_instance', 'snapshot', 'apply_immediately', 'new_instance_name' ] + [invalid_security_group_type(subnet)] + def create_db_instance(self, instance_name, size, instance_class, db_engine, + username, password, **params): + params['engine'] = db_engine + result = self.connection.create_dbinstance(instance_name, size, instance_class, + username, password, **params) + return RDSDBInstance(result) - elif command == 'replicate': - required_vars = [ 'instance_name', 'source_instance' ] - invalid_vars = [ 'db_engine', 'size', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'backup_window', 'backup_retention', 'subnet', 'snapshot', 'apply_immediately', 'new_instance_name' ] + def create_db_instance_read_replica(self, instance_name, source_instance, **params): + result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params) + return RDSDBInstance(result) - elif command == 'delete': - required_vars = [ 'instance_name' ] - invalid_vars = [ 'db_engine', 'size', 'instance_type', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups' ,'security_groups', 'option_group', 'maint_window', 'backup_window', 'backup_retention', 'port', 'upgrade', 'subnet', 'zone' , 'source_instance', 'apply_immediately', 'new_instance_name' ] + def delete_db_instance(self, instance_name, **params): + result = self.connection.delete_dbinstance(instance_name, **params) + return RDSDBInstance(result) - elif command == 'facts': - required_vars = [ 'instance_name' ] - invalid_vars = [ 'db_engine', 'size', 'instance_type', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'backup_window', 'backup_retention', 'port', 'upgrade', 'subnet', 'zone', 'wait', 'source_instance' 'apply_immediately', 'new_instance_name' ] + def delete_db_snapshot(self, snapshot): + result = self.connection.delete_dbsnapshot(snapshot) + return RDSSnapshot(result) - elif command == 'modify': - required_vars = [ 'instance_name' ] - if password: - params["master_password"] = password - invalid_vars = [ 'db_engine', 'username', 'db_name', 'engine_version', 'license_model', 'option_group', 'port', 'upgrade', 'subnet', 'zone', 'source_instance'] + def modify_db_instance(self, instance_name, **params): + result = self.connection.modify_dbinstance(instance_name, **params) + return RDSDBInstance(result) - elif command == 'promote': - required_vars = [ 'instance_name' ] - invalid_vars = [ 'db_engine', 'size', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'subnet', 'source_instance', 'snapshot', 'apply_immediately', 'new_instance_name' ] - - elif command == 'snapshot': - required_vars = [ 'instance_name', 'snapshot'] - invalid_vars = [ 'db_engine', 'size', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'subnet', 'source_instance', 'apply_immediately', 'new_instance_name' ] - - elif command == 'restore': - required_vars = [ 'instance_name', 'snapshot', 'instance_type' ] - invalid_vars = [ 'db_engine', 'db_name', 'username', 'password', 'engine_version', 'option_group', 'source_instance', 'apply_immediately', 'new_instance_name', 'vpc_security_groups', 'security_groups' ] - - for v in required_vars: - if not module.params.get(v): - module.fail_json(msg = str("Parameter %s required for %s command" % (v, command))) - - for v in invalid_vars: - if module.params.get(v): - module.fail_json(msg = str("Parameter %s invalid for %s command" % (v, command))) + def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): + result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) + return RDSDBInstance(result) - if db_engine: - params["engine"] = db_engine + def create_db_snapshot(self, snapshot, instance_name, **params): + result = self.connection.create_dbsnapshot(snapshot, instance_name) + return RDSSnapshot(result) - if port: - params["port"] = port + def promote_read_replica(self, instance_name, **params): + result = self.connection.promote_read_replica(instance_name, **params) + return RDSInstance(result) - if db_name: - params["db_name"] = db_name - if parameter_group: - params["param_group"] = parameter_group +class RDS2Connection: + def __init__(self, module, region, **aws_connect_params): + try: + self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params) + except boto.exception.BotoServerError, e: + module.fail_json(msg=e.error_message) - if zone: - params["availability_zone"] = zone - - if maint_window: - params["preferred_maintenance_window"] = maint_window + def get_db_instance(self, instancename): + try: + dbinstances = self.connection.describe_db_instances(db_instance_identifier=instancename)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'] + result = RDS2DBInstance(dbinstances[0]) + return result + except boto.rds2.exceptions.DBInstanceNotFound, e: + return None + except Exception, e: + raise e + + def get_db_snapshot(self, snapshotid): + try: + snapshots = self.connection.describe_db_snapshots(db_snapshot_identifier=snapshotid, snapshot_type='manual')['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots'] + result = RDS2Snapshot(snapshots[0]) + return result + except boto.rds2.exceptions.DBSnapshotNotFound, e: + return None + + def create_db_instance(self, instance_name, size, instance_class, db_engine, + username, password, **params): + result = self.connection.create_db_instance(instance_name, size, instance_class, + db_engine, username, password, **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + + def create_db_instance_read_replica(self, instance_name, source_instance, **params): + result = self.connection.create_db_instance_read_replica(instance_name, source_instance, **params)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance'] + return RDS2DBInstance(result) + + def delete_db_instance(self, instance_name, **params): + result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + + def delete_db_snapshot(self, snapshot): + result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot'] + return RDS2Snapshot(result) + + def modify_db_instance(self, instance_name, **params): + result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + + def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): + result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance'] + return RDS2DBInstance(result) + + def create_db_snapshot(self, snapshot, instance_name, **params): + result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot'] + return RDS2Snapshot(result) + + def promote_read_replica(self, instance_name, **params): + result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance'] + return RDS2DBInstance(result) + + +class RDSDBInstance: + def __init__(self, dbinstance): + self.instance = dbinstance + self.name = dbinstance.id + self.status = dbinstance.status + + def get_data(self): + d = { + 'id' : self.name, + 'create_time' : self.instance.create_time, + 'status' : self.status, + 'availability_zone' : self.instance.availability_zone, + 'backup_retention' : self.instance.backup_retention_period, + 'backup_window' : self.instance.preferred_backup_window, + 'maintenance_window' : self.instance.preferred_maintenance_window, + 'multi_zone' : self.instance.multi_az, + 'instance_type' : self.instance.instance_class, + 'username' : self.instance.master_username, + 'iops' : self.instance.iops + } + + # Endpoint exists only if the instance is available + if self.status == 'available': + d["endpoint"] = self.instance.endpoint[0] + d["port"] = self.instance.endpoint[1] + if self.instance.vpc_security_groups is not None: + d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups) + else: + d["vpc_security_groups"] = None + else: + d["endpoint"] = None + d["port"] = None + d["vpc_security_groups"] = None - if backup_window: - params["preferred_backup_window"] = backup_window + # ReadReplicaSourceDBInstanceIdentifier may or may not exist + try: + d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier + except Exception, e: + d["replication_source"] = None + return d - if backup_retention: - params["backup_retention_period"] = backup_retention - if multi_zone: - params["multi_az"] = multi_zone - if engine_version: - params["engine_version"] = engine_version - if upgrade: - params["auto_minor_version_upgrade"] = upgrade +class RDS2DBInstance: + def __init__(self, dbinstance): + self.instance = dbinstance + if 'DBInstanceIdentifier' not in dbinstance: + self.name = None + else: + self.name = self.instance.get('DBInstanceIdentifier') + self.status = self.instance.get('DBInstanceStatus') + + def get_data(self): + d = { + 'id': self.name, + 'create_time': self.instance['InstanceCreateTime'], + 'status': self.status, + 'availability_zone': self.instance['AvailabilityZone'], + 'backup_retention': self.instance['BackupRetentionPeriod'], + 'maintenance_window': self.instance['PreferredMaintenanceWindow'], + 'multi_zone': self.instance['MultiAZ'], + 'instance_type': self.instance['DBInstanceClass'], + 'username': self.instance['MasterUsername'], + 'iops': self.instance['Iops'], + 'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier'] + } + if self.instance["VpcSecurityGroups"] is not None: + d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups']) + if self.status == 'available': + d['endpoint'] = self.instance["Endpoint"]["Address"] + d['port'] = self.instance["Endpoint"]["Port"] + else: + d['endpoint'] = None + d['port'] = None + + return d + + +class RDSSnapshot: + def __init__(self, snapshot): + self.snapshot = snapshot + self.name = snapshot.id + self.status = snapshot.status + + def get_data(self): + d = { + 'id' : self.name, + 'create_time' : self.snapshot.snapshot_create_time, + 'status' : self.status, + 'availability_zone' : self.snapshot.availability_zone, + 'instance_id' : self.snapshot.instance_id, + 'instance_created' : self.snapshot.instance_create_time, + } + # needs boto >= 2.21.0 + if hasattr(self.snapshot, 'snapshot_type'): + d["snapshot_type"] = self.snapshot.snapshot_type + if hasattr(self.snapshot, 'iops'): + d["iops"] = self.snapshot.iops + return d + + +class RDS2Snapshot: + def __init__(self, snapshot): + if 'DeleteDBSnapshotResponse' in snapshot: + self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot'] + else: + self.snapshot = snapshot + self.name = self.snapshot.get('DBSnapshotIdentifier') + self.status = self.snapshot.get('Status') + + def get_data(self): + d = { + 'id' : self.name, + 'create_time' : self.snapshot['SnapshotCreateTime'], + 'status' : self.status, + 'availability_zone' : self.snapshot['AvailabilityZone'], + 'instance_id' : self.snapshot['DBInstanceIdentifier'], + 'instance_created' : self.snapshot['InstanceCreateTime'], + 'snapshot_type' : self.snapshot['SnapshotType'], + 'iops' : self.snapshot['Iops'], + } + return d + + +def await_resource(conn, resource, status, module): + wait_timeout = module.params.get('wait_timeout') + time.time() + while wait_timeout > time.time() and resource.status != status: + time.sleep(5) + if wait_timeout <= time.time(): + module.fail_json(msg="Timeout waiting for resource %s" % resource.id) + if module.params.get('command') == 'snapshot': + # Temporary until all the rds2 commands have their responses parsed + if resource.name is None: + module.fail_json(msg="Problem with snapshot %s" % resource.snapshot) + resource = conn.get_db_snapshot(resource.name) + else: + # Temporary until all the rds2 commands have their responses parsed + if resource.name is None: + module.fail_json(msg="Problem with instance %s" % resource.instance) + resource = conn.get_db_instance(resource.name) + return resource + + +def create_db_instance(module, conn): + subnet = module.params.get('subnet') + required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password'] + valid_vars = ['backup_retention', 'backup_window', + 'character_set_name', 'db_name', 'engine_version', + 'instance_type', 'iops', 'license_model', 'maint_window', + 'multi_zone', 'option_group', 'parameter_group','port', + 'subnet', 'upgrade', 'zone'] + if module.params.get('subnet'): + valid_vars.append('vpc_security_groups') + else: + valid_vars.append('security_groups') + if has_rds2: + valid_vars.extend(['publicly_accessible', 'tags']) + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + + result = conn.get_db_instance(instance_name) + if result: + changed = False + else: + try: + result = conn.create_db_instance(instance_name, module.params.get('size'), + module.params.get('instance_type'), module.params.get('db_engine'), + module.params.get('username'), module.params.get('password'), **params) + changed = True + except boto.exception.StandardError, e: + module.fail_json(msg=e.error_message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) - if subnet: - params["db_subnet_group_name"] = subnet + module.exit_json(changed=changed, instance=resource.get_data()) - if license_model: - params["license_model"] = license_model - if option_group: - params["option_group_name"] = option_group +def replicate_db_instance(module, conn): + required_vars = ['instance_name', 'source_instance'] + valid_vars = ['instance_type', 'port', 'upgrade', 'zone'] + if has_rds2: + valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags']) + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + source_instance = module.params.get('source_instance') - if iops: - params["iops"] = iops + result = conn.get_db_instance(instance_name) + if result: + changed = False + else: + try: + result = conn.create_db_instance_read_replica(instance_name, source_instance, **params) + changed = True + except boto.exception.StandardError, e: + module.fail_json(msg=e.error_message) - if security_groups: - params["security_groups"] = security_groups.split(',') + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) - if vpc_security_groups: - groups_list = [] - for x in vpc_security_groups: - groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x)) - params["vpc_security_groups"] = groups_list + module.exit_json(changed=changed, instance=resource.get_data()) - if new_instance_name: - params["new_instance_id"] = new_instance_name - changed = True +def delete_db_instance_or_snapshot(module, conn): + required_vars = [] + valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot'] + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + snapshot = module.params.get('snapshot') - if command in ['create', 'restore', 'facts']: - try: - result = conn.get_all_dbinstances(instance_name)[0] - changed = False - except boto.exception.BotoServerError, e: - try: - if command == 'create': - result = conn.create_dbinstance(instance_name, size, instance_type, username, password, **params) - if command == 'restore': - result = conn.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) - if command == 'facts': - module.fail_json(msg = "DB Instance %s does not exist" % instance_name) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - if command == 'snapshot': - try: - result = conn.get_all_dbsnapshots(snapshot)[0] - changed = False - except boto.exception.BotoServerError, e: - try: - result = conn.create_dbsnapshot(snapshot, instance_name) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - if command == 'delete': - try: - result = conn.get_all_dbinstances(instance_name)[0] - if result.status == 'deleting': - module.exit_json(changed=False) - except boto.exception.BotoServerError, e: - module.exit_json(changed=False) - try: + if not instance_name: + result = conn.get_db_snapshot(snapshot) + else: + result = conn.get_db_instance(instance_name) + if not result: + module.exit_json(changed=False) + if result.status == 'deleting': + module.exit_json(changed=False) + try: + if instance_name: if snapshot: params["skip_final_snapshot"] = False params["final_snapshot_id"] = snapshot else: params["skip_final_snapshot"] = True - result = conn.delete_dbinstance(instance_name, **params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) + result = conn.delete_db_instance(instance_name, **params) + else: + result = conn.delete_db_snapshot(snapshot) + except boto.exception.StandardError, e: + module.fail_json(msg=e.error_message) - if command == 'replicate': - try: - if instance_type: - params["instance_class"] = instance_type - result = conn.create_dbinstance_read_replica(instance_name, source_instance, **params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) + # If we're not waiting for a delete to complete then we're all done + # so just return + if not module.params.get('wait'): + module.exit_json(changed=True) + try: + resource = await_resource(conn, result, 'deleted', module) + module.exit_json(changed=True) + except boto.exception.StandardError, e: + if e.error_code == 'DBInstanceNotFound': + module.exit_json(changed=True) + else: + module.fail_json(msg=e.error_message) + except Exception, e: + module.fail_json(msg=str(e)) + + +def facts_db_instance_or_snapshot(module, conn): + required_vars = [] + valid_vars = ['instance_name', 'snapshot'] + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + snapshot = module.params.get('snapshot') + + if instance_name and snapshot: + module.fail_json(msg="facts must be called with either instance_name or snapshot, not both") + if instance_name: + resource = conn.get_db_instance(instance_name) + if not resource: + module.fail_json(msg="DB Instance %s does not exist" % instance_name) + if snapshot: + resource = conn.get_db_snapshot(snapshot) + if not resource: + module.fail_json(msg="DB snapshot %s does not exist" % snapshot) + + module.exit_json(changed=False, instance=resource.get_data()) + + +def modify_db_instance(module, conn): + required_vars = ['instance_name'] + valid_vars = ['backup_retention', 'backup_window', 'db_name', 'engine_version', + 'instance_type', 'iops', 'license_model', 'maint_window', + 'password', 'multi_zone', 'new_instance_name', + 'option_group', 'parameter_group', + 'size', 'upgrade'] + + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + new_instance_name = module.params.get('new_instance_name') - if command == 'modify': - try: - params["apply_immediately"] = apply_immediately - result = conn.modify_dbinstance(instance_name, **params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - if apply_immediately: - if new_instance_name: - # Wait until the new instance name is valid - found = 0 - while found == 0: + try: + result = conn.modify_db_instance(instance_name, **params) + except boto.exception.StandardError, e: + module.fail_json(msg=e.error_message) + if params.get('apply_immediately'): + if new_instance_name: + # Wait until the new instance name is valid + found = 0 + while found == 0: + if has_rds2: + instances = conn.describe_all_db_instances() + else: instances = conn.get_all_dbinstances() - for i in instances: - if i.id == new_instance_name: - instance_name = new_instance_name - found = 1 - if found == 0: - time.sleep(5) - - # The name of the database has now changed, so we have - # to force result to contain the new instance, otherwise - # the call below to get_current_resource will fail since it - # will be looking for the old instance name. - result.id = new_instance_name - else: - # Wait for a few seconds since it takes a while for AWS - # to change the instance from 'available' to 'modifying' - time.sleep(5) + for i in instances: + if i.id == new_instance_name: + instance_name = new_instance_name + found = 1 + if found == 0: + time.sleep(5) + + # The name of the database has now changed, so we have + # to force result to contain the new instance, otherwise + # the call below to get_current_resource will fail since it + # will be looking for the old instance name. + result.id = new_instance_name + else: + # Wait for a few seconds since it takes a while for AWS + # to change the instance from 'available' to 'modifying' + time.sleep(5) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + # guess that this changed the DB, need a way to check + module.exit_json(changed=True, instance=resource.get_data()) - if command == 'promote': + +def promote_db_instance(module, conn): + required_vars = ['instance_name'] + valid_vars = ['backup_retention', 'backup_window'] + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + + result = conn.get_db_instance(instance_name) + if result.get_data().get('replication_source'): + changed = False + else: try: result = conn.promote_read_replica(instance_name, **params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) + except boto.exception.StandardError, e: + module.fail_json(msg=e.error_message) - # If we're not waiting for a delete to complete then we're all done - # so just return - if command == 'delete' and not wait: - module.exit_json(changed=True) + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) - try: - resource = get_current_resource(conn, result.id, command) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) + module.exit_json(changed=changed, instance=resource.get_data()) - # Wait for the resource to be available if requested - if wait: - try: - wait_timeout = time.time() + wait_timeout - time.sleep(5) - while wait_timeout > time.time() and resource.status != 'available': - time.sleep(5) - if wait_timeout <= time.time(): - module.fail_json(msg = "Timeout waiting for resource %s" % resource.id) - resource = get_current_resource(conn, result.id, command) - except boto.exception.BotoServerError, e: - # If we're waiting for an instance to be deleted then - # get_all_dbinstances will eventually throw a - # DBInstanceNotFound error. - if command == 'delete' and e.error_code == 'DBInstanceNotFound': - module.exit_json(changed=True) - else: - module.fail_json(msg = e.error_message) - - # If we got here then pack up all the instance details to send - # back to ansible - if command == 'snapshot': - d = { - 'id' : resource.id, - 'create_time' : resource.snapshot_create_time, - 'status' : resource.status, - 'availability_zone' : resource.availability_zone, - 'instance_id' : resource.instance_id, - 'instance_created' : resource.instance_create_time, - } +def snapshot_db_instance(module, conn): + required_vars = ['instance_name', 'snapshot'] + valid_vars = ['tags'] + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + snapshot = module.params.get('snapshot') + changed = False + result = conn.get_db_snapshot(snapshot) + if not result: try: - d["snapshot_type"] = resource.snapshot_type - d["iops"] = resource.iops - except AttributeError, e: - pass # needs boto >= 2.21.0 - - return module.exit_json(changed=changed, snapshot=d) - - d = { - 'id' : resource.id, - 'create_time' : resource.create_time, - 'status' : resource.status, - 'availability_zone' : resource.availability_zone, - 'backup_retention' : resource.backup_retention_period, - 'backup_window' : resource.preferred_backup_window, - 'maintenance_window' : resource.preferred_maintenance_window, - 'multi_zone' : resource.multi_az, - 'instance_type' : resource.instance_class, - 'username' : resource.master_username, - 'iops' : resource.iops - } + result = conn.create_db_snapshot(snapshot, instance_name, **params) + changed = True + except boto.exception.StandardError, e: + module.fail_json(msg=e.error_message) - # Endpoint exists only if the instance is available - if resource.status == 'available' and command != 'snapshot': - d["endpoint"] = resource.endpoint[0] - d["port"] = resource.endpoint[1] - if resource.vpc_security_groups is not None: - d["vpc_security_groups"] = ','.join(x.vpc_group for x in resource.vpc_security_groups) - else: - d["vpc_security_groups"] = None + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) else: - d["endpoint"] = None - d["port"] = None - d["vpc_security_groups"] = None + resource = conn.get_db_snapshot(snapshot) - # ReadReplicaSourceDBInstanceIdentifier may or may not exist - try: - d["replication_source"] = resource.ReadReplicaSourceDBInstanceIdentifier - except Exception, e: - d["replication_source"] = None + module.exit_json(changed=changed, snapshot=resource.get_data()) - module.exit_json(changed=changed, instance=d) +def restore_db_instance(module, conn): + required_vars = ['instance_name', 'snapshot'] + valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone', + 'option_group', 'port', 'publicly_accessible', + 'subnet', 'tags', 'upgrade', 'zone'] + if has_rds2: + valid_vars.append('instance_type') + else: + required_vars.append('instance_type') + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + instance_type = module.params.get('instance_type') + snapshot = module.params.get('snapshot') + + changed = False + result = conn.get_db_instance(instance_name) + if not result: + try: + result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params) + changed = True + except boto.exception.StandardError, e: + module.fail_json(msg=e.error_message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + module.exit_json(changed=changed, instance=resource.get_data()) + + +def validate_parameters(required_vars, valid_vars, module): + command = module.params.get('command') + for v in required_vars: + if not module.params.get(v): + module.fail_json(msg="Parameter %s required for %s command" % (v, command)) + + # map to convert rds module options to boto rds and rds2 options + optional_params = { + 'port': 'port', + 'db_name': 'db_name', + 'zone': 'availability_zone', + 'maint_window': 'preferred_maintenance_window', + 'backup_window': 'preferred_backup_window', + 'backup_retention': 'backup_retention_period', + 'multi_zone': 'multi_az', + 'engine_version': 'engine_version', + 'upgrade': 'auto_minor_version_upgrade', + 'subnet': 'db_subnet_group_name', + 'license_model': 'license_model', + 'option_group': 'option_group_name', + 'iops': 'iops', + 'new_instance_name': 'new_instance_id', + 'apply_immediately': 'apply_immediately', + } + # map to convert rds module options to boto rds options + optional_params_rds = { + 'db_engine': 'engine', + 'password': 'master_password', + 'parameter_group': 'param_group', + 'instance_type': 'instance_class', + } + # map to convert rds module options to boto rds2 options + optional_params_rds2 = { + 'tags': 'tags', + 'publicly_accessible': 'publicly_accessible', + 'parameter_group': 'db_parameter_group_name', + 'character_set_name': 'character_set_name', + 'instance_type': 'db_instance_class', + 'password': 'master_user_password', + } + if has_rds2: + optional_params.update(optional_params_rds2) + sec_group = 'db_security_groups' + else: + optional_params.update(optional_params_rds) + sec_group = 'security_groups' + # Check for options only supported with rds2 + for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()): + if module.params.get(k): + module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k) + + params = {} + for (k, v) in optional_params.items(): + if module.params.get(k) and k not in required_vars: + if k in valid_vars: + params[v] = module.params[k] + else: + module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command)) + + if module.params.get('security_groups'): + params[sec_group] = module.params.get('security_groups').split(',') + + if module.params.get('vpc_security_groups'): + groups_list = [] + for x in module.params.get('vpc_security_groups'): + groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x)) + params["vpc_security_groups"] = groups_list + return params + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'restore'], required=True), + instance_name = dict(required=False), + source_instance = dict(required=False), + db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False), + size = dict(required=False), + instance_type = dict(aliases=['type'], required=False), + username = dict(required=False), + password = dict(no_log=True, required=False), + db_name = dict(required=False), + engine_version = dict(required=False), + parameter_group = dict(required=False), + license_model = dict(choices=['license-included', 'bring-your-own-license', 'general-public-license'], required=False), + multi_zone = dict(type='bool', default=False), + iops = dict(required=False), + security_groups = dict(required=False), + vpc_security_groups = dict(type='list', required=False), + port = dict(required=False), + upgrade = dict(type='bool', default=False), + option_group = dict(required=False), + maint_window = dict(required=False), + backup_window = dict(required=False), + backup_retention = dict(required=False), + zone = dict(aliases=['aws_zone', 'ec2_zone'], required=False), + subnet = dict(required=False), + wait = dict(type='bool', default=False), + wait_timeout = dict(default=300), + snapshot = dict(required=False), + apply_immediately = dict(type='bool', default=False), + new_instance_name = dict(required=False), + tags = dict(type='list', required=False), + publicly_accessible = dict(required=False), + character_set_name = dict(required=False), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + invocations = { + 'create': create_db_instance, + 'replicate': replicate_db_instance, + 'delete': delete_db_instance_or_snapshot, + 'facts': facts_db_instance_or_snapshot, + 'modify': modify_db_instance, + 'promote': promote_db_instance, + 'snapshot': snapshot_db_instance, + 'restore': restore_db_instance, + } + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg="region not specified and unable to determine region from EC2_REGION.") + + # connect to the rds endpoint + if has_rds2: + conn = RDS2Connection(module, region, **aws_connect_params) + else: + conn = RDSConnection(module, region, **aws_connect_params) + + invocations[module.params.get('command')](module, conn) + # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * From a1a6201b82db573d0ed8b0ded8fbc99062a2d1ff Mon Sep 17 00:00:00 2001 From: Will Thames Date: Tue, 2 Sep 2014 22:08:44 +1000 Subject: [PATCH 031/236] Improved error handling More tests, particular for failure testing --- cloud/amazon/rds.py | 165 ++++++++++++++++++++++++++++++-------------- 1 file changed, 113 insertions(+), 52 deletions(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 5e20e3470dc..adfd40247c6 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -302,6 +302,19 @@ except ImportError: has_rds2 = False +class RDSException(Exception): + def __init__(self, exc): + if hasattr(exc, 'error_message') and exc.error_message: + self.message = exc.error_message + self.code = exc.error_code + elif hasattr(exc, 'body') and 'Error' in exc.body: + self.message = exc.body['Error']['Message'] + self.code = exc.body['Error']['Code'] + else: + self.message = str(exc) + self.code = 'Unknown Error' + + class RDSConnection: def __init__(self, module, region, **aws_connect_params): try: @@ -312,49 +325,73 @@ class RDSConnection: def get_db_instance(self, instancename): try: return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0]) - except boto.exception.BotoServerError,e: + except boto.exception.BotoServerError, e: return None def get_db_snapshot(self, snapshotid): try: return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0]) - except boto.exception.BotoServerError,e: + except boto.exception.BotoServerError, e: return None def create_db_instance(self, instance_name, size, instance_class, db_engine, username, password, **params): params['engine'] = db_engine - result = self.connection.create_dbinstance(instance_name, size, instance_class, - username, password, **params) - return RDSDBInstance(result) + try: + result = self.connection.create_dbinstance(instance_name, size, instance_class, + username, password, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) def create_db_instance_read_replica(self, instance_name, source_instance, **params): - result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params) - return RDSDBInstance(result) + try: + result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) def delete_db_instance(self, instance_name, **params): - result = self.connection.delete_dbinstance(instance_name, **params) - return RDSDBInstance(result) + try: + result = self.connection.delete_dbinstance(instance_name, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) def delete_db_snapshot(self, snapshot): - result = self.connection.delete_dbsnapshot(snapshot) - return RDSSnapshot(result) + try: + result = self.connection.delete_dbsnapshot(snapshot) + return RDSSnapshot(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) def modify_db_instance(self, instance_name, **params): - result = self.connection.modify_dbinstance(instance_name, **params) - return RDSDBInstance(result) + try: + result = self.connection.modify_dbinstance(instance_name, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): - result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) - return RDSDBInstance(result) + try: + result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) def create_db_snapshot(self, snapshot, instance_name, **params): - result = self.connection.create_dbsnapshot(snapshot, instance_name) - return RDSSnapshot(result) + try: + result = self.connection.create_dbsnapshot(snapshot, instance_name) + return RDSSnapshot(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) def promote_read_replica(self, instance_name, **params): - result = self.connection.promote_read_replica(instance_name, **params) - return RDSInstance(result) + try: + result = self.connection.promote_read_replica(instance_name, **params) + return RDSInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) class RDS2Connection: @@ -384,37 +421,61 @@ class RDS2Connection: def create_db_instance(self, instance_name, size, instance_class, db_engine, username, password, **params): - result = self.connection.create_db_instance(instance_name, size, instance_class, + try: + result = self.connection.create_db_instance(instance_name, size, instance_class, db_engine, username, password, **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance'] - return RDS2DBInstance(result) + return RDS2DBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) def create_db_instance_read_replica(self, instance_name, source_instance, **params): - result = self.connection.create_db_instance_read_replica(instance_name, source_instance, **params)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance'] - return RDS2DBInstance(result) + try: + result = self.connection.create_db_instance_read_replica(instance_name, source_instance, **params)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) def delete_db_instance(self, instance_name, **params): - result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance'] - return RDS2DBInstance(result) + try: + result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) def delete_db_snapshot(self, snapshot): - result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot'] - return RDS2Snapshot(result) + try: + result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot'] + return RDS2Snapshot(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) def modify_db_instance(self, instance_name, **params): - result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance'] - return RDS2DBInstance(result) + try: + result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): - result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance'] - return RDS2DBInstance(result) + try: + result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) def create_db_snapshot(self, snapshot, instance_name, **params): - result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot'] - return RDS2Snapshot(result) + try: + result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot'] + return RDS2Snapshot(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) def promote_read_replica(self, instance_name, **params): - result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance'] - return RDS2DBInstance(result) + try: + result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) class RDSDBInstance: @@ -587,8 +648,8 @@ def create_db_instance(module, conn): module.params.get('instance_type'), module.params.get('db_engine'), module.params.get('username'), module.params.get('password'), **params) changed = True - except boto.exception.StandardError, e: - module.fail_json(msg=e.error_message) + except RDSException, e: + module.fail_json(msg=e.message) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) @@ -614,8 +675,8 @@ def replicate_db_instance(module, conn): try: result = conn.create_db_instance_read_replica(instance_name, source_instance, **params) changed = True - except boto.exception.StandardError, e: - module.fail_json(msg=e.error_message) + except RDSException, e: + module.fail_json(msg=e.message) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) @@ -650,8 +711,8 @@ def delete_db_instance_or_snapshot(module, conn): result = conn.delete_db_instance(instance_name, **params) else: result = conn.delete_db_snapshot(snapshot) - except boto.exception.StandardError, e: - module.fail_json(msg=e.error_message) + except RDSException, e: + module.fail_json(msg=e.message) # If we're not waiting for a delete to complete then we're all done # so just return @@ -660,11 +721,11 @@ def delete_db_instance_or_snapshot(module, conn): try: resource = await_resource(conn, result, 'deleted', module) module.exit_json(changed=True) - except boto.exception.StandardError, e: - if e.error_code == 'DBInstanceNotFound': + except RDSException, e: + if e.code == 'DBInstanceNotFound': module.exit_json(changed=True) else: - module.fail_json(msg=e.error_message) + module.fail_json(msg=e.message) except Exception, e: module.fail_json(msg=str(e)) @@ -704,8 +765,8 @@ def modify_db_instance(module, conn): try: result = conn.modify_db_instance(instance_name, **params) - except boto.exception.StandardError, e: - module.fail_json(msg=e.error_message) + except RDSException, e: + module.fail_json(msg=e.message) if params.get('apply_immediately'): if new_instance_name: # Wait until the new instance name is valid @@ -753,8 +814,8 @@ def promote_db_instance(module, conn): else: try: result = conn.promote_read_replica(instance_name, **params) - except boto.exception.StandardError, e: - module.fail_json(msg=e.error_message) + except RDSException, e: + module.fail_json(msg=e.message) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) @@ -776,8 +837,8 @@ def snapshot_db_instance(module, conn): try: result = conn.create_db_snapshot(snapshot, instance_name, **params) changed = True - except boto.exception.StandardError, e: - module.fail_json(msg=e.error_message) + except RDSException, e: + module.fail_json(msg=e.message) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) @@ -807,8 +868,8 @@ def restore_db_instance(module, conn): try: result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params) changed = True - except boto.exception.StandardError, e: - module.fail_json(msg=e.error_message) + except RDSException, e: + module.fail_json(msg=e.message) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) From 0edb9f51e85c10ccbbf0779689e7a4805c67c519 Mon Sep 17 00:00:00 2001 From: Denver Janke Date: Thu, 4 Sep 2014 16:13:09 +1000 Subject: [PATCH 032/236] Fix few bugs around renaming db instances. --- cloud/amazon/rds.py | 43 +++++++++++++++---------------------------- 1 file changed, 15 insertions(+), 28 deletions(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index adfd40247c6..0095aed7ba8 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -753,11 +753,10 @@ def facts_db_instance_or_snapshot(module, conn): def modify_db_instance(module, conn): required_vars = ['instance_name'] - valid_vars = ['backup_retention', 'backup_window', 'db_name', 'engine_version', - 'instance_type', 'iops', 'license_model', 'maint_window', - 'password', 'multi_zone', 'new_instance_name', - 'option_group', 'parameter_group', - 'size', 'upgrade'] + valid_vars = ['apply_immediately', 'backup_retention', 'backup_window', + 'db_name', 'engine_version', 'instance_type', 'iops', 'license_model', + 'maint_window', 'multi_zone', 'new_instance_name', + 'option_group', 'parameter_group' 'password', 'size', 'upgrade'] params = validate_parameters(required_vars, valid_vars, module) instance_name = module.params.get('instance_name') @@ -770,28 +769,15 @@ def modify_db_instance(module, conn): if params.get('apply_immediately'): if new_instance_name: # Wait until the new instance name is valid - found = 0 - while found == 0: - if has_rds2: - instances = conn.describe_all_db_instances() - else: - instances = conn.get_all_dbinstances() - for i in instances: - if i.id == new_instance_name: - instance_name = new_instance_name - found = 1 - if found == 0: - time.sleep(5) - - # The name of the database has now changed, so we have - # to force result to contain the new instance, otherwise - # the call below to get_current_resource will fail since it - # will be looking for the old instance name. - result.id = new_instance_name - else: - # Wait for a few seconds since it takes a while for AWS - # to change the instance from 'available' to 'modifying' - time.sleep(5) + new_instance = None + while not new_instance: + new_instance = conn.get_db_instance(new_instance_name) + time.sleep(5) + + # Found instance but it briefly flicks to available + # before rebooting so let's wait until we see it rebooting + # before we check whether to 'wait' + result = await_resource(conn, new_instance, 'rebooting', module) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) @@ -918,6 +904,7 @@ def validate_parameters(required_vars, valid_vars, module): 'character_set_name': 'character_set_name', 'instance_type': 'db_instance_class', 'password': 'master_user_password', + 'new_instance_name': 'new_db_instance_identifier', } if has_rds2: optional_params.update(optional_params_rds2) @@ -977,7 +964,7 @@ def main(): zone = dict(aliases=['aws_zone', 'ec2_zone'], required=False), subnet = dict(required=False), wait = dict(type='bool', default=False), - wait_timeout = dict(default=300), + wait_timeout = dict(type='int', default=300), snapshot = dict(required=False), apply_immediately = dict(type='bool', default=False), new_instance_name = dict(required=False), From 565fa9a0ae8e3867852a65343f198ff3c0b6b404 Mon Sep 17 00:00:00 2001 From: Will Thames Date: Mon, 1 Sep 2014 10:11:52 +1000 Subject: [PATCH 033/236] Fixed tagging --- cloud/amazon/rds.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 0095aed7ba8..96e4848e056 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -236,7 +236,7 @@ options: version_added: 1.8 tags: description: - - tags to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0 + - tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0 required: false default: null aliases: [] @@ -257,6 +257,9 @@ EXAMPLES = ''' instance_type: db.m1.small username: mysql_admin password: 1nsecure + tags: + Environment: testing + Application: cms # Create a read-only replica and wait for it to become available - rds: @@ -933,6 +936,10 @@ def validate_parameters(required_vars, valid_vars, module): for x in module.params.get('vpc_security_groups'): groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x)) params["vpc_security_groups"] = groups_list + + # Convert tags dict to list of tuples that rds2 expects + if 'tags' in params: + params['tags'] = module.params['tags'].items() return params @@ -968,7 +975,7 @@ def main(): snapshot = dict(required=False), apply_immediately = dict(type='bool', default=False), new_instance_name = dict(required=False), - tags = dict(type='list', required=False), + tags = dict(type='dict', required=False), publicly_accessible = dict(required=False), character_set_name = dict(required=False), ) From a75bef5bc6ae3ca23e312f9aec9c56b31811f4d0 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Mon, 29 Sep 2014 20:09:32 -0400 Subject: [PATCH 034/236] Fixes bind mounts Fixed tab's and spaces, or so I think I did Fixed tabs and spaces for real this time --- system/mount.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/system/mount.py b/system/mount.py index 9dc6fbe7b8c..d1104b361e1 100644 --- a/system/mount.py +++ b/system/mount.py @@ -320,6 +320,17 @@ def main(): if os.path.ismount(name): if changed: res,msg = mount(module, **args) + elif "bind" in args['opts']: + changed = True + cmd = 'mount -l' + rc, out, err = module.run_command(cmd) + allmounts = out.split('\n') + for mounts in allmounts[:-1]: + arguments = mounts.split() + if arguments[0] == args['src'] and arguments[2] == args['name'] and arguments[4] == args['fstype']: + changed = False + if changed: + res,msg = mount(module, **args) else: changed = True res,msg = mount(module, **args) From a5b1a599e20ea9230956783036d6f5aaf5b154ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Gos=C5=82awski?= Date: Thu, 4 Dec 2014 17:07:03 +0100 Subject: [PATCH 035/236] update docs for insertbefore/insertafter --- files/lineinfile.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/files/lineinfile.py b/files/lineinfile.py index e1a7980f38a..e02274b0027 100644 --- a/files/lineinfile.py +++ b/files/lineinfile.py @@ -85,8 +85,9 @@ options: default: EOF description: - Used with C(state=present). If specified, the line will be inserted - after the specified regular expression. A special value is - available; C(EOF) for inserting the line at the end of the file. + after the last match of specified regular expression. A special value is + available; C(EOF) for inserting the line at the end of the file. + If specified regular expresion has no matches, EOF will be used instead. May not be used with C(backrefs). choices: [ 'EOF', '*regex*' ] insertbefore: @@ -94,9 +95,10 @@ options: version_added: "1.1" description: - Used with C(state=present). If specified, the line will be inserted - before the specified regular expression. A value is available; - C(BOF) for inserting the line at the beginning of the file. - May not be used with C(backrefs). + before the last match of specified regular expression. A value is + available; C(BOF) for inserting the line at the beginning of the file. + If specified regular expresion has no matches, C(insertbefore) will be + ignored. May not be used with C(backrefs). choices: [ 'BOF', '*regex*' ] create: required: false From 2d5d7ff542591a55eba0270346087567c8bbeb7b Mon Sep 17 00:00:00 2001 From: Jesse Buchanan Date: Sun, 7 Dec 2014 13:35:24 -0500 Subject: [PATCH 036/236] Files module: Allow touch on hardlinks --- files/file.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/files/file.py b/files/file.py index 7aa5e45d7bc..d8c5b2762d8 100644 --- a/files/file.py +++ b/files/file.py @@ -332,13 +332,13 @@ def main(): open(path, 'w').close() except OSError, e: module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e)) - elif prev_state in ['file', 'directory']: + elif prev_state in ['file', 'directory', 'hard']: try: os.utime(path, None) except OSError, e: module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e)) else: - module.fail_json(msg='Cannot touch other than files and directories') + module.fail_json(msg='Cannot touch other than files, directories, and hardlinks (%s is %s)' % (path, prev_state)) try: module.set_fs_attributes_if_different(file_args, True) except SystemExit, e: From 05ca85a172d6d186fefe5e5dcb36c6157c34730c Mon Sep 17 00:00:00 2001 From: Peter Gehres Date: Wed, 10 Dec 2014 10:50:16 -0800 Subject: [PATCH 037/236] Issue #489: s3 should not fail on checksum mismtach when overwrite=no --- cloud/amazon/s3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 7b914dd9117..c91cc262367 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -376,7 +376,7 @@ def main(): if overwrite is True: download_s3file(module, s3, bucket, obj, dest) else: - module.fail_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True) + module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=False) # Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message. if sum_matches is True and overwrite is False: From 2acfbf016d8626df445839caf522debc393f0d31 Mon Sep 17 00:00:00 2001 From: Patrik Lundin Date: Thu, 11 Dec 2014 23:01:23 +0100 Subject: [PATCH 038/236] Handle string returned by 'default' correctly. We need to handle the string returned by 'default' in the same way we handle the string returned by 'status' since the resulting flags are compared later. --- system/service.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index 2d62edc67ac..c9ce55d1a37 100644 --- a/system/service.py +++ b/system/service.py @@ -996,7 +996,14 @@ class OpenBsdService(Service): if stderr: self.module.fail_json(msg=stderr) - default_flags = stdout.rstrip() + default_string = stdout.rstrip() + + # Depending on the service the string returned from 'default' may be + # either a set of flags or the boolean YES/NO + if default_string == "YES" or default_string == "NO": + default_flags = '' + else: + default_flags = default_string rc, stdout, stderr = self.execute_command("%s %s %s" % (self.enable_cmd, 'status', self.name)) From ccfdff4f0091c59d58421fecb14d8e86375eb406 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sat, 13 Dec 2014 22:05:40 +0100 Subject: [PATCH 039/236] Make force parameter work for export operation The default is changed from 'yes' to 'no' to follow subversion behavior (ie, requiring explicit confirmation to erase a existing repository). Since that was not working before cf #370 and since the option was ignored before and unused, this should be safe to change. --- source_control/subversion.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/source_control/subversion.py b/source_control/subversion.py index 6709a8c3939..e3a013cc340 100644 --- a/source_control/subversion.py +++ b/source_control/subversion.py @@ -123,7 +123,12 @@ class Subversion(object): def export(self, force=False): '''Export svn repo to directory''' - self._exec(["export", "-r", self.revision, self.repo, self.dest]) + cmd = ["export"] + if force: + cmd.append("--force") + cmd.extend(["-r", self.revision, self.repo, self.dest]) + + self._exec(cmd) def switch(self): '''Change working directory's repo.''' @@ -173,7 +178,7 @@ def main(): dest=dict(required=True), repo=dict(required=True, aliases=['name', 'repository']), revision=dict(default='HEAD', aliases=['rev', 'version']), - force=dict(default='yes', type='bool'), + force=dict(default='no', type='bool'), username=dict(required=False), password=dict(required=False), executable=dict(default=None), @@ -202,7 +207,7 @@ def main(): if not export: svn.checkout() else: - svn.export() + svn.export(force=force) elif os.path.exists("%s/.svn" % (dest, )): # Order matters. Need to get local mods before switch to avoid false # positives. Need to switch before revert to ensure we are reverting to From fa46f3c269194bfd05b5f4b330d3e8c00d17b38d Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sat, 13 Dec 2014 22:07:41 +0100 Subject: [PATCH 040/236] Fix #370, by allowing to export over a existing repository This requires to use force=True --- source_control/subversion.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/source_control/subversion.py b/source_control/subversion.py index e3a013cc340..8f6d81e5694 100644 --- a/source_control/subversion.py +++ b/source_control/subversion.py @@ -199,7 +199,7 @@ def main(): os.environ['LANG'] = 'C' svn = Subversion(module, dest, repo, revision, username, password, svn_path) - if not os.path.exists(dest): + if export or not os.path.exists(dest): before = None local_mods = False if module.check_mode: @@ -227,9 +227,12 @@ def main(): else: module.fail_json(msg="ERROR: %s folder already exists, but its not a subversion repository." % (dest, )) - after = svn.get_revision() - changed = before != after or local_mods - module.exit_json(changed=changed, before=before, after=after) + if export: + module.exit_json(changed=True) + else: + after = svn.get_revision() + changed = before != after or local_mods + module.exit_json(changed=changed, before=before, after=after) # import module snippets from ansible.module_utils.basic import * From 0ab3b30b2faccab5245aeea172644a15c2274c4d Mon Sep 17 00:00:00 2001 From: Richard Lander Date: Mon, 15 Dec 2014 16:26:33 -0500 Subject: [PATCH 041/236] set network_config.public_ips attribute --- cloud/azure/azure.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/azure/azure.py b/cloud/azure/azure.py index 1679fbc45d1..9c73df9069f 100644 --- a/cloud/azure/azure.py +++ b/cloud/azure/azure.py @@ -281,6 +281,7 @@ def create_virtual_machine(module, azure): network_config = ConfigurationSetInputEndpoints() network_config.configuration_set_type = 'NetworkConfiguration' network_config.subnet_names = [] + network_config.public_ips = None for port in endpoints: network_config.input_endpoints.append(ConfigurationSetInputEndpoint(name='TCP-%s' % port, protocol='TCP', From 73e32db22fbdecd049bd740a45c63ec6e51b09fe Mon Sep 17 00:00:00 2001 From: Richard Lander Date: Tue, 16 Dec 2014 17:28:12 -0500 Subject: [PATCH 042/236] password required --- cloud/azure/azure.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/azure/azure.py b/cloud/azure/azure.py index 1679fbc45d1..be9f0af56b1 100644 --- a/cloud/azure/azure.py +++ b/cloud/azure/azure.py @@ -442,6 +442,8 @@ def main(): module.fail_json(msg='location parameter is required for new instance') if not module.params.get('storage_account'): module.fail_json(msg='storage_account parameter is required for new instance') + if not module.params.get('password'): + module.fail_json(msg='password parameter is required for new instance') (changed, public_dns_name, deployment) = create_virtual_machine(module, azure) module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__))) From a942e5f85319c6516dbb9f2989cd55b4865b4518 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 17 Dec 2014 12:48:01 -0500 Subject: [PATCH 043/236] Revert "Merge pull request #384 from jhawkesworth/win_copy_file_template_ansible_modules_core_1" I missed some discussion in devel, these need more work before inclusion This reverts commit 58bfebb0477adda2a676381850038e0abf8c8f00, reversing changes made to 27dee77ca0449bdb338b2db89e044d1d9b553b4a. --- windows/win_copy.ps1 | 84 -------------------------------- windows/win_copy.py | 60 ----------------------- windows/win_file.ps1 | 105 ---------------------------------------- windows/win_file.py | 73 ---------------------------- windows/win_stat.ps1 | 6 ++- windows/win_template.py | 52 -------------------- 6 files changed, 4 insertions(+), 376 deletions(-) delete mode 100644 windows/win_copy.ps1 delete mode 100644 windows/win_copy.py delete mode 100644 windows/win_file.ps1 delete mode 100644 windows/win_file.py delete mode 100644 windows/win_template.py diff --git a/windows/win_copy.ps1 b/windows/win_copy.ps1 deleted file mode 100644 index 9ffdab85f03..00000000000 --- a/windows/win_copy.ps1 +++ /dev/null @@ -1,84 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -$src= Get-Attr $params "src" $FALSE; -If ($src -eq $FALSE) -{ - Fail-Json (New-Object psobject) "missing required argument: src"; -} - -$dest= Get-Attr $params "dest" $FALSE; -If ($dest -eq $FALSE) -{ - Fail-Json (New-Object psobject) "missing required argument: dest"; -} - -# seems to be supplied by the calling environment, but -# probably shouldn't be a test for it existing in the params. -# TODO investigate. -$original_basename = Get-Attr $params "original_basename" $FALSE; -If ($original_basename -eq $FALSE) -{ - Fail-Json (New-Object psobject) "missing required argument: original_basename "; -} - -$result = New-Object psobject @{ - changed = $FALSE -}; - -# if $dest is a dir, append $original_basename so the file gets copied with its intended name. -if (Test-Path $dest -PathType Container) -{ - $dest = Join-Path $dest $original_basename; -} - -If (Test-Path $dest) -{ - $dest_checksum = Get-FileChecksum ($dest); - $src_checksum = Get-FileChecksum ($src); - - If (! $src_checksum.CompareTo($dest_checksum)) - { - # New-Item -Force creates subdirs for recursive copies - New-Item -Force $dest -Type file; - Copy-Item -Path $src -Destination $dest -Force; - } - $dest_checksum = Get-FileChecksum ($dest); - If ( $src_checksum.CompareTo($dest_checksum)) - { - $result.changed = $TRUE; - } - Else - { - Fail-Json (New-Object psobject) "Failed to place file"; - } -} -Else -{ - New-Item -Force $dest -Type file; - Copy-Item -Path $src -Destination $dest; - $result.changed = $TRUE; -} - -$dest_checksum = Get-FileChecksum($dest); -$result.checksum = $dest_checksum; - -Exit-Json $result; diff --git a/windows/win_copy.py b/windows/win_copy.py deleted file mode 100644 index 7d0b49e5985..00000000000 --- a/windows/win_copy.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import os -import time - -DOCUMENTATION = ''' ---- -module: win_copy -version_added: "1.8" -short_description: Copies files to remote locations on windows hosts. -description: - - The M(win_copy) module copies a file on the local box to remote windows locations. -options: - src: - description: - - Local path to a file to copy to the remote server; can be absolute or relative. - If path is a directory, it is copied recursively. In this case, if path ends - with "/", only inside contents of that directory are copied to destination. - Otherwise, if it does not end with "/", the directory itself with all contents - is copied. This behavior is similar to Rsync. - required: false - default: null - aliases: [] - dest: - description: - - Remote absolute path where the file should be copied to. If src is a directory, - this must be a directory too. Use \\ for path separators. - required: true - default: null -author: Michael DeHaan -notes: - - The "win_copy" module recursively copy facility does not scale to lots (>hundreds) of files. - Instead, you may find it better to create files locally, perhaps using win_template, and - then use win_get_url to put them in the correct location. -''' - -EXAMPLES = ''' -# Example from Ansible Playbooks -- win_copy: src=/srv/myfiles/foo.conf dest=c:\\TEMP\\foo.conf - -''' - diff --git a/windows/win_file.ps1 b/windows/win_file.ps1 deleted file mode 100644 index 62ac81fc1ee..00000000000 --- a/windows/win_file.ps1 +++ /dev/null @@ -1,105 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -# path -$path = Get-Attr $params "path" $FALSE; -If ($path -eq $FALSE) -{ - $path = Get-Attr $params "dest" $FALSE; - If ($path -eq $FALSE) - { - $path = Get-Attr $params "name" $FALSE; - If ($path -eq $FALSE) - { - Fail-Json (New-Object psobject) "missing required argument: path"; - } - } -} - -# JH Following advice from Chris Church, only allow the following states -# in the windows version for now: -# state - file, directory, touch, absent -# (originally was: state - file, link, directory, hard, touch, absent) - -$state = Get-Attr $params "state" "file"; - -#$recurse = Get-Attr $params "recurse" "no"; - -# force - yes, no -# $force = Get-Attr $params "force" "no"; - -# result -$result = New-Object psobject @{ - changed = $FALSE -}; - -If ( $state -eq "touch" ) -{ - If(Test-Path $path) - { - (Get-ChildItem $path).LastWriteTime = Get-Date - } - Else - { - echo $null > $file - } - $result.changed = $TRUE; -} - -If (Test-Path $path) -{ - $fileinfo = Get-Item $path; - If ( $state -eq "absent" ) - { - Remove-Item -Recurse -Force $fileinfo; - $result.changed = $TRUE; - } - Else - { - # Only files have the .Directory attribute. - If ( $state -eq "directory" -and $fileinfo.Directory ) - { - Fail-Json (New-Object psobject) "path is not a directory"; - } - - # Only files have the .Directory attribute. - If ( $state -eq "file" -and -not $fileinfo.Directory ) - { - Fail-Json (New-Object psobject) "path is not a file"; - } - - } -} -Else -{ - If ( $state -eq "directory" ) - { - New-Item -ItemType directory -Path $path - $result.changed = $TRUE; - } - - If ( $state -eq "file" ) - { - Fail-Json (New-Object psobject) "path will not be created"; - } -} - -Exit-Json $result; diff --git a/windows/win_file.py b/windows/win_file.py deleted file mode 100644 index 6a218216617..00000000000 --- a/windows/win_file.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - - -DOCUMENTATION = ''' ---- -module: win_file -version_added: "1.8" -short_description: Creates, touches or removes files or directories. -extends_documentation_fragment: files -description: - - Creates (empty) files, updates file modification stamps of existing files, - and can create or remove directories. - Unlike M(file), does not modify ownership, permissions or manipulate links. -notes: - - See also M(win_copy), M(win_template), M(copy), M(template), M(assemble) -requirements: [ ] -author: Michael DeHaan -options: - path: - description: - - 'path to the file being managed. Aliases: I(dest), I(name)' - required: true - default: [] - aliases: ['dest', 'name'] - state: - description: - - If C(directory), all immediate subdirectories will be created if they - do not exist. - If C(file), the file will NOT be created if it does not exist, see the M(copy) - or M(template) module if you want that behavior. If C(absent), - directories will be recursively deleted, and files will be removed. - If C(touch), an empty file will be created if the c(path) does not - exist, while an existing file or directory will receive updated file access and - modification times (similar to the way `touch` works from the command line). - required: false - default: file - choices: [ file, directory, touch, absent ] -''' - -EXAMPLES = ''' -# create a file -- win_file: path=C:\\temp\\foo.conf - -# touch a file (creates if not present, updates modification time if present) -- win_file: path=C:\\temp\\foo.conf state=touch - -# remove a file, if present -- win_file: path=C:\\temp\\foo.conf state=absent - -# create directory structure -- win_file: path=C:\\temp\\folder\\subfolder state=directory - -# remove directory structure -- win_file: path=C:\\temp state=absent -''' diff --git a/windows/win_stat.ps1 b/windows/win_stat.ps1 index 10101a62b30..4e4c55b2aa3 100644 --- a/windows/win_stat.ps1 +++ b/windows/win_stat.ps1 @@ -53,9 +53,11 @@ Else If ($get_md5 -and $result.stat.exists -and -not $result.stat.isdir) { - $hash = Get-FileChecksum($path); + $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider; + $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); + $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); + $fp.Dispose(); Set-Attr $result.stat "md5" $hash; - Set-Attr $result.stat "checksum" $hash; } Exit-Json $result; diff --git a/windows/win_template.py b/windows/win_template.py deleted file mode 100644 index 402702f93b2..00000000000 --- a/windows/win_template.py +++ /dev/null @@ -1,52 +0,0 @@ -# this is a virtual module that is entirely implemented server side - -DOCUMENTATION = ''' ---- -module: win_template -version_added: 1.8 -short_description: Templates a file out to a remote server. -description: - - Templates are processed by the Jinja2 templating language - (U(http://jinja.pocoo.org/docs/)) - documentation on the template - formatting can be found in the Template Designer Documentation - (U(http://jinja.pocoo.org/docs/templates/)). - - "Six additional variables can be used in templates: C(ansible_managed) - (configurable via the C(defaults) section of C(ansible.cfg)) contains a string - which can be used to describe the template name, host, modification time of the - template file and the owner uid, C(template_host) contains the node name of - the template's machine, C(template_uid) the owner, C(template_path) the - absolute path of the template, C(template_fullpath) is the absolute path of the - template, and C(template_run_date) is the date that the template was rendered. Note that including - a string that uses a date in the template will result in the template being marked 'changed' - each time." -options: - src: - description: - - Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path. - required: true - default: null - aliases: [] - dest: - description: - - Location to render the template to on the remote machine. - required: true - default: null - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - required: false - choices: [ "yes", "no" ] - default: "no" -notes: - - "templates are loaded with C(trim_blocks=True)." -requirements: [] -author: Michael DeHaan -''' - -EXAMPLES = ''' -# Example -- win_template: src=/mytemplates/foo.j2 dest=C:\\temp\\file.conf - - -''' From d564569910ea8a71b954c73807776a98c3a00153 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 17 Dec 2014 19:55:54 -0500 Subject: [PATCH 044/236] update to use connect_to_region to avoid errors with china --- cloud/amazon/cloudformation.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index 1c8a9d6aca5..b382e3f05ff 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -130,13 +130,6 @@ except ImportError: sys.exit(1) -class Region: - def __init__(self, region): - '''connects boto to the region specified in the cloudformation template''' - self.name = region - self.endpoint = 'cloudformation.%s.amazonaws.com' % region - - def boto_exception(err): '''generic error message handler''' if hasattr(err, 'error_message'): @@ -239,11 +232,10 @@ def main(): stack_outputs = {} try: - cf_region = Region(region) - cfn = boto.cloudformation.connection.CloudFormationConnection( - aws_access_key_id=aws_access_key, + cfn = boto.cloudformation.connect_to_region( + region, + aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, - region=cf_region, ) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) From c242de1a396614f8b37df2e687bd0f73332aa300 Mon Sep 17 00:00:00 2001 From: Rohan McGovern Date: Thu, 13 Nov 2014 08:14:31 +1000 Subject: [PATCH 045/236] git: clean up "fetch" method De-duplicate repetitive code checking the exit code. Include the stdout/stderr of the failed process in all cases. Remove the returned values because no caller uses them. Combine git commands where possible. There is no need to fetch branches and tags as two separate operations. --- source_control/git.py | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index 3b627b2594e..f3cb329faf1 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -453,33 +453,20 @@ def get_head_branch(git_path, module, dest, remote, bare=False): def fetch(git_path, module, repo, dest, version, remote, bare): ''' updates repo from remote sources ''' - out_acc = [] - err_acc = [] - (rc, out0, err0) = module.run_command([git_path, 'remote', 'set-url', remote, repo], cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to set a new url %s for %s: %s" % (repo, remote, out0 + err0)) - if bare: - (rc, out1, err1) = module.run_command([git_path, 'fetch', remote, '+refs/heads/*:refs/heads/*'], cwd=dest) - else: - (rc, out1, err1) = module.run_command("%s fetch %s" % (git_path, remote), cwd=dest) - out_acc.append(out1) - err_acc.append(err1) - if rc != 0: - module.fail_json(msg="Failed to download remote objects and refs: %s %s" % - (''.join(out_acc), ''.join(err_acc))) + commands = [["set a new url %s for %s" % (repo, remote)], [git_path, 'remote', 'set-url', remote, repo]] + + fetch_str = 'download remote objects and refs' if bare: - (rc, out2, err2) = module.run_command([git_path, 'fetch', remote, '+refs/tags/*:refs/tags/*'], cwd=dest) + refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*'] + commands.append([fetch_str, [git_path, 'fetch', remote] + refspecs]) else: - (rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote), cwd=dest) - out_acc.append(out2) - err_acc.append(err2) - if rc != 0: - module.fail_json(msg="Failed to download remote objects and refs: %s %s" % - (''.join(out_acc), ''.join(err_acc))) - - return (rc, ''.join(out_acc), ''.join(err_acc)) + commands.append([fetch_str, [git_path, 'fetch', '--tags']]) + for (label,command) in commands: + (rc,out,err) = module.run_command(command, cwd=dest) + if rc != 0: + module.fail_json(msg="Failed to %s: %s %s" % (label, out, err)) def submodules_fetch(git_path, module, remote, track_submodules, dest): changed = False From cf8504728490c352172156034d93a81a03ef8c39 Mon Sep 17 00:00:00 2001 From: Rohan McGovern Date: Fri, 21 Nov 2014 12:27:03 +1000 Subject: [PATCH 046/236] git: add 'refspec' argument This argument may be used to fetch additional refs beyond the default refs/heads/* and refs/tags/*. Checking out GitHub pull requests or Gerrit patch sets are two examples where this is useful. Without this, specifying version= with a SHA1 unreachable from any tag or branch can't work. --- source_control/git.py | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index f3cb329faf1..dbea32d7d60 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -80,6 +80,17 @@ options: default: "origin" description: - Name of the remote. + refspec: + required: false + default: null + version_added: "1.9" + description: + - Add an additional refspec to be fetched. + If version is set to a I(SHA-1) not reachable from any branch + or tag, this option may be necessary to specify the ref containing + the I(SHA-1). + Uses the same syntax as the 'git fetch' command. + An example value could be "refs/meta/config". force: required: false default: "yes" @@ -166,6 +177,9 @@ EXAMPLES = ''' # Example just ensuring the repo checkout exists - git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout clone=no update=no + +# Example checkout a github repo and use refspec to fetch all pull requests +- git: repo=https://github.com/ansible/ansible-examples.git dest=/src/ansible-examples refspec=+refs/pull/*:refs/heads/* ''' import re @@ -279,7 +293,7 @@ def get_submodule_versions(git_path, module, dest, version='HEAD'): return submodules def clone(git_path, module, repo, dest, remote, depth, version, bare, - reference): + reference, refspec): ''' makes a new git repo if it does not already exist ''' dest_dirname = os.path.dirname(dest) try: @@ -304,6 +318,9 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare, if remote != 'origin': module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest) + if refspec: + module.run_command([git_path, 'fetch', remote, refspec], check_rc=True, cwd=dest) + def has_local_mods(module, git_path, dest, bare): if bare: return False @@ -451,7 +468,7 @@ def get_head_branch(git_path, module, dest, remote, bare=False): f.close() return branch -def fetch(git_path, module, repo, dest, version, remote, bare): +def fetch(git_path, module, repo, dest, version, remote, bare, refspec): ''' updates repo from remote sources ''' commands = [["set a new url %s for %s" % (repo, remote)], [git_path, 'remote', 'set-url', remote, repo]] @@ -459,9 +476,16 @@ def fetch(git_path, module, repo, dest, version, remote, bare): if bare: refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*'] + if refspec: + refspecs.append(refspec) commands.append([fetch_str, [git_path, 'fetch', remote] + refspecs]) else: commands.append([fetch_str, [git_path, 'fetch', '--tags']]) + if refspec: + # unlike in bare mode, there's no way to combine the + # additional refspec with the default git fetch behavior, + # so use two commands + commands.append([fetch_str, [git_path, 'fetch', remote, refspec]]) for (label,command) in commands: (rc,out,err) = module.run_command(command, cwd=dest) @@ -579,6 +603,7 @@ def main(): repo=dict(required=True, aliases=['name']), version=dict(default='HEAD'), remote=dict(default='origin'), + refspec=dict(default=None), reference=dict(default=None), force=dict(default='yes', type='bool'), depth=dict(default=None, type='int'), @@ -599,6 +624,7 @@ def main(): repo = module.params['repo'] version = module.params['version'] remote = module.params['remote'] + refspec = module.params['refspec'] force = module.params['force'] depth = module.params['depth'] update = module.params['update'] @@ -656,7 +682,7 @@ def main(): remote_head = get_remote_head(git_path, module, dest, version, repo, bare) module.exit_json(changed=True, before=before, after=remote_head) # there's no git config, so clone - clone(git_path, module, repo, dest, remote, depth, version, bare, reference) + clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec) repo_updated = True elif not update: # Just return having found a repo already in the dest path @@ -690,7 +716,7 @@ def main(): if repo_updated is None: if module.check_mode: module.exit_json(changed=True, before=before, after=remote_head) - fetch(git_path, module, repo, dest, version, remote, bare) + fetch(git_path, module, repo, dest, version, remote, bare, refspec) repo_updated = True # switch to version specified regardless of whether From 18c429d016218ea7ab8559f45773b53d39961f87 Mon Sep 17 00:00:00 2001 From: Peter Oliver Date: Wed, 17 Dec 2014 12:44:58 +0000 Subject: [PATCH 047/236] Fix appending to a user's group on Solaris Without this change, you get: AttributeError: 'set' object has no attribute 'extend' Tested on a Solaris 11.2 client with the included Python 2.6.8. --- system/user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/user.py b/system/user.py index 30ae29d30ae..aaeee5683d3 100644 --- a/system/user.py +++ b/system/user.py @@ -1255,7 +1255,7 @@ class SunOS(User): cmd.append('-G') new_groups = groups if self.append: - new_groups.extend(current_groups) + new_groups.update(current_groups) cmd.append(','.join(new_groups)) if self.comment is not None and info[4] != self.comment: From 8a03af66083da993c47a970cde44ab8fc39744b6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 22 Dec 2014 12:11:49 -0800 Subject: [PATCH 048/236] Fix typo in git refspec code. Change lists to tuples --- source_control/git.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index 968ae77bcd1..f67abe32fa2 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -474,7 +474,7 @@ def get_head_branch(git_path, module, dest, remote, bare=False): def fetch(git_path, module, repo, dest, version, remote, bare, refspec): ''' updates repo from remote sources ''' - commands = [["set a new url %s for %s" % (repo, remote)], [git_path, 'remote', 'set-url', remote, repo]] + commands = [("set a new url %s for %s" % (repo, remote), [git_path, 'remote', 'set-url', remote, repo])] fetch_str = 'download remote objects and refs' @@ -482,14 +482,14 @@ def fetch(git_path, module, repo, dest, version, remote, bare, refspec): refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*'] if refspec: refspecs.append(refspec) - commands.append([fetch_str, [git_path, 'fetch', remote] + refspecs]) + commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs)) else: - commands.append([fetch_str, [git_path, 'fetch', '--tags']]) + commands.append((fetch_str, [git_path, 'fetch', '--tags'])) if refspec: # unlike in bare mode, there's no way to combine the # additional refspec with the default git fetch behavior, # so use two commands - commands.append([fetch_str, [git_path, 'fetch', remote, refspec]]) + commands.append((fetch_str, [git_path, 'fetch', remote, refspec])) for (label,command) in commands: (rc,out,err) = module.run_command(command, cwd=dest) From 8f6ae92cf88beda287c6c11d8b4127239c3168e0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 22 Dec 2014 15:08:25 -0800 Subject: [PATCH 049/236] git fetch --tags overwrites normal fetching with git < 1.8.x so do a normal fetch followed by using the refspec format for fetching tags --- source_control/git.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index f67abe32fa2..44ebf06487a 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -484,12 +484,14 @@ def fetch(git_path, module, repo, dest, version, remote, bare, refspec): refspecs.append(refspec) commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs)) else: - commands.append((fetch_str, [git_path, 'fetch', '--tags'])) + # unlike in bare mode, there's no way to combine the + # additional refspec with the default git fetch behavior, + # so use two commands + commands.append((fetch_str, [git_path, 'fetch', remote])) + refspecs = ['+refs/tags/*:refs/tags/*'] if refspec: - # unlike in bare mode, there's no way to combine the - # additional refspec with the default git fetch behavior, - # so use two commands - commands.append((fetch_str, [git_path, 'fetch', remote, refspec])) + refspecs.append(refspec) + commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs)) for (label,command) in commands: (rc,out,err) = module.run_command(command, cwd=dest) From 54214f83b5da2920a05b65535116c01cab7cb617 Mon Sep 17 00:00:00 2001 From: Jeff Gonzalez Date: Mon, 22 Dec 2014 18:22:31 -0600 Subject: [PATCH 050/236] Added ability to use url as key source --- system/authorized_key.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/system/authorized_key.py b/system/authorized_key.py index d5792200b8d..898f74b575b 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -118,6 +118,7 @@ import os.path import tempfile import re import shlex +import urllib2 class keydict(dict): @@ -333,6 +334,14 @@ def enforce_state(module, params): state = params.get("state", "present") key_options = params.get("key_options", None) + if key.startswith("http"): + try: + gh_key = urllib2.urlopen(key).read() + except urllib2.URLError, e: + module.fail_json(msg="no key found at: %s" % key) + + key = gh_key + # extract individual keys into an array, skipping blank lines and comments key = [s for s in key.splitlines() if s and not s.startswith('#')] From b894bc2b771f8d87acb4b1e01aef713551941e28 Mon Sep 17 00:00:00 2001 From: "Michael J. Schultz" Date: Tue, 23 Dec 2014 14:16:29 -0600 Subject: [PATCH 051/236] Build the db connection on `"postgres"` instead of `"template1"` According to the postgresql docs[1], you should not have a connection with `"template1"` when copying multiple databases. [1]: http://www.postgresql.org/docs/9.1/static/manage-ag-templatedbs.html --- database/postgresql/postgresql_db.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/postgresql/postgresql_db.py b/database/postgresql/postgresql_db.py index 941644d6fb1..4ce8e146ccd 100644 --- a/database/postgresql/postgresql_db.py +++ b/database/postgresql/postgresql_db.py @@ -275,7 +275,7 @@ def main(): kw["host"] = module.params["login_unix_socket"] try: - db_connection = psycopg2.connect(database="template1", **kw) + db_connection = psycopg2.connect(database="postgres", **kw) # Enable autocommit so we can create databases if psycopg2.__version__ >= '2.4.2': db_connection.autocommit = True From 82601fdc546bf9de70c71ccaf5ac323f918168f3 Mon Sep 17 00:00:00 2001 From: Jan Weitz Date: Wed, 24 Dec 2014 03:04:04 +0100 Subject: [PATCH 052/236] Fixes version check for docker-py --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index f53819f2679..00c805b8f85 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -443,7 +443,7 @@ def get_docker_py_versioninfo(): # than 0.3.0 so it's okay to lie here. version = (0,) - return version + return tuple(version) def check_dependencies(module): """ From ebf9b8c6e289024f46d18ed0cd567fac9156ac83 Mon Sep 17 00:00:00 2001 From: Jan Weitz Date: Wed, 24 Dec 2014 04:03:22 +0100 Subject: [PATCH 053/236] Fixes invalid dictionary access. --- cloud/docker/docker.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 00c805b8f85..03bf8a4af03 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -726,6 +726,8 @@ class DockerManager(object): 'name': self.module.params.get('name'), 'stdin_open': self.module.params.get('stdin_open'), 'tty': self.module.params.get('tty'), + 'dns': self.module.params.get('dns'), + 'volumes_from': self.module.params.get('volumes_from'), } if params['dns'] is not None: @@ -783,7 +785,7 @@ class DockerManager(object): 'network_mode': self.module.params.get('net'), } - optionals = [] + optionals = {} for optional_param in ('dns', 'volumes_from', 'restart_policy', 'restart_policy_retry'): optionals[optional_param] = self.module.params.get(optional_param) From 8ede9de895264d95dfd3cf977dc1281c7cac0efd Mon Sep 17 00:00:00 2001 From: Mark Phillips Date: Wed, 24 Dec 2014 12:39:48 +0000 Subject: [PATCH 054/236] vsphere_guest fix for KeyError: folder message --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 86cc9f00fa7..817421011d2 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -684,7 +684,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, hfmor = dcprops.hostFolder._obj # virtualmachineFolder managed object reference - if vm_extra_config['folder']: + if vm_extra_config.get('folder'): if vm_extra_config['folder'] not in vsphere_client._get_managed_objects(MORTypes.Folder).values(): vsphere_client.disconnect() module.fail_json(msg="Cannot find folder named: %s" % vm_extra_config['folder']) From 55b85ddc46b8cff5d6756ce4d4c1ebdffba419bc Mon Sep 17 00:00:00 2001 From: FabioBatSilva Date: Tue, 23 Dec 2014 19:54:25 -0500 Subject: [PATCH 055/236] fix compatibility issues with python-apt < 0.7.9 --- packaging/os/apt.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 6e2f26f7237..d5ae62d5320 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -206,7 +206,12 @@ def package_status(m, pkgname, version, cache, state): package_is_installed = pkg.isInstalled if version: - avail_upgrades = fnmatch.filter((p.version for p in pkg.versions), version) + try: + avail_upgrades = fnmatch.filter((p.version for p in pkg.versions), version) + except AttributeError: + # assume older version of python-apt is installed + # apt.package.Package#versions require python-apt >= 0.7.9. + avail_upgrades = [] if package_is_installed: try: From a08165ca2e05808555077023ffaa1cd4a1499e48 Mon Sep 17 00:00:00 2001 From: fabios Date: Wed, 24 Dec 2014 17:55:44 -0500 Subject: [PATCH 056/236] use low-level apt_pkg.Package --- packaging/os/apt.py | 33 ++++++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index d5ae62d5320..e04b426fa86 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -173,6 +173,29 @@ def package_split(pkgspec): else: return parts[0], None +def package_versions(pkgname, pkg, pkg_cache): + versions = {} + + try: + for p in pkg.versions: + versions[p.version] = p.version + except AttributeError: + # assume older version of python-apt is installed + # apt.package.Package#versions require python-apt >= 0.7.9. + pkg_cache_list = filter(lambda p: p.Name == pkgname, pkg_cache.Packages) + + for pkg_cache in pkg_cache_list: + for p in pkg_cache.VersionList: + versions[p.VerStr] = p.VerStr + + return versions + +def package_version_compare(version, other_version): + try: + return apt_pkg.version_compare(version, other_version) + except AttributeError: + return apt_pkg.VersionCompare(version, other_version) + def package_status(m, pkgname, version, cache, state): try: # get the package from the cache, as well as the @@ -206,12 +229,8 @@ def package_status(m, pkgname, version, cache, state): package_is_installed = pkg.isInstalled if version: - try: - avail_upgrades = fnmatch.filter((p.version for p in pkg.versions), version) - except AttributeError: - # assume older version of python-apt is installed - # apt.package.Package#versions require python-apt >= 0.7.9. - avail_upgrades = [] + versions = package_versions(pkgname, pkg, cache._cache) + avail_upgrades = fnmatch.filter(versions, version) if package_is_installed: try: @@ -225,7 +244,7 @@ def package_status(m, pkgname, version, cache, state): # Only claim the package is upgradable if a candidate matches the version package_is_upgradable = False for candidate in avail_upgrades: - if pkg.versions[candidate] > pkg.installed: + if package_version_compare(versions[candidate], installed_version) > 0: package_is_upgradable = True break else: From 170457413dd179c3154a4184cbe12ad1ab14c86e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 25 Dec 2014 00:25:02 -0800 Subject: [PATCH 057/236] Cleanup the old apt compatibility changes --- packaging/os/apt.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index e04b426fa86..43a7d6b390b 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -174,19 +174,14 @@ def package_split(pkgspec): return parts[0], None def package_versions(pkgname, pkg, pkg_cache): - versions = {} - try: - for p in pkg.versions: - versions[p.version] = p.version + versions = set(p.version for p in pkg.versions) except AttributeError: # assume older version of python-apt is installed # apt.package.Package#versions require python-apt >= 0.7.9. - pkg_cache_list = filter(lambda p: p.Name == pkgname, pkg_cache.Packages) - - for pkg_cache in pkg_cache_list: - for p in pkg_cache.VersionList: - versions[p.VerStr] = p.VerStr + pkg_cache_list = (p for p in pkg_cache.Packages if p.Name == pkgname) + pkg_versions = (p.VersionList for p in pkg_cache_list) + versions = set(p.VerStr for p in pkg_versions) return versions @@ -244,7 +239,7 @@ def package_status(m, pkgname, version, cache, state): # Only claim the package is upgradable if a candidate matches the version package_is_upgradable = False for candidate in avail_upgrades: - if package_version_compare(versions[candidate], installed_version) > 0: + if package_version_compare(candidate, installed_version) > 0: package_is_upgradable = True break else: From 3665c92856e0b9293895a018ed596aab8f680cf4 Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Thu, 25 Dec 2014 23:31:06 -0500 Subject: [PATCH 058/236] docker: fix volume[s]_from typo Code makes reference to volume_from instead of volumes_from. If volumes_from is passed as an argument, generates a KeyError. --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 03bf8a4af03..f71bad42e79 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -467,7 +467,7 @@ class DockerManager(object): # server APIVersion is passed to a docker-py function that takes strings _cap_ver_req = { 'dns': ((0, 3, 0), '1.10'), - 'volume_from': ((0, 3, 0), '1.10'), + 'volumes_from': ((0, 3, 0), '1.10'), 'restart_policy': ((0, 5, 0), '1.14'), # Clientside only 'insecure_registry': ((0, 5, 0), '0.0') From 50011f13d65c2d0a74a1e2ffc822bf13069fcc4d Mon Sep 17 00:00:00 2001 From: Johnny Robeson Date: Fri, 26 Dec 2014 00:02:36 -0500 Subject: [PATCH 059/236] [hostname] rename FedoraStratgy to SystemdStrategy --- system/hostname.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/system/hostname.py b/system/hostname.py index cd5738b43d5..2ca7479829b 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -28,6 +28,7 @@ requirements: [ hostname ] description: - Set system's hostname - Currently implemented on Debian, Ubuntu, Fedora, RedHat, openSUSE, Linaro, ScientificLinux, Arch, CentOS, AMI. + - Any distribution that uses systemd as their init system options: name: required: true @@ -232,9 +233,9 @@ class RedHatStrategy(GenericStrategy): # =========================================== -class FedoraStrategy(GenericStrategy): +class SystemdStrategy(GenericStrategy): """ - This is a Fedora family Hostname manipulation strategy class - it uses + This is a Systemd hostname manipulation strategy class - it uses the hostnamectl command. """ @@ -323,17 +324,17 @@ class OpenRCStrategy(GenericStrategy): class FedoraHostname(Hostname): platform = 'Linux' distribution = 'Fedora' - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy class OpenSUSEHostname(Hostname): platform = 'Linux' distribution = 'Opensuse ' - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy class ArchHostname(Hostname): platform = 'Linux' distribution = 'Arch' - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy class RedHat5Hostname(Hostname): platform = 'Linux' @@ -345,7 +346,7 @@ class RedHatServerHostname(Hostname): distribution = 'Red hat enterprise linux server' distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy else: strategy_class = RedHatStrategy @@ -354,7 +355,7 @@ class RedHatWorkstationHostname(Hostname): distribution = 'Red hat enterprise linux workstation' distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy else: strategy_class = RedHatStrategy @@ -363,7 +364,7 @@ class CentOSHostname(Hostname): distribution = 'Centos' distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy else: strategy_class = RedHatStrategy @@ -372,7 +373,7 @@ class CentOSLinuxHostname(Hostname): distribution = 'Centos linux' distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy else: strategy_class = RedHatStrategy From 6fce4a9c3dac5704eeba2e17b4bf023f930542c2 Mon Sep 17 00:00:00 2001 From: fabios Date: Thu, 25 Dec 2014 14:14:01 -0500 Subject: [PATCH 060/236] handle list of list for python-apt < 0.7.9 compatibility --- packaging/os/apt.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 43a7d6b390b..7eba3432e60 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -180,7 +180,8 @@ def package_versions(pkgname, pkg, pkg_cache): # assume older version of python-apt is installed # apt.package.Package#versions require python-apt >= 0.7.9. pkg_cache_list = (p for p in pkg_cache.Packages if p.Name == pkgname) - pkg_versions = (p.VersionList for p in pkg_cache_list) + pkg_list_of_lists = (p.VersionList for p in pkg_cache_list) + pkg_versions = (p for l in pkg_list_of_lists for p in l) versions = set(p.VerStr for p in pkg_versions) return versions From d19f7c702613b73154c40b40ebf6bdcd06745615 Mon Sep 17 00:00:00 2001 From: fabios Date: Sat, 27 Dec 2014 15:30:56 -0500 Subject: [PATCH 061/236] older python-apt compatibility --- packaging/os/apt.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 7eba3432e60..d1101bc7b8b 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -202,8 +202,12 @@ def package_status(m, pkgname, version, cache, state): ll_pkg = cache._cache[pkgname] # the low-level package object except KeyError: if state == 'install': - if cache.get_providing_packages(pkgname): - return False, True, False + try: + if cache.get_providing_packages(pkgname): + return False, True, False + except AttributeError: + # older python-apt providing packages cannot be used + pass m.fail_json(msg="No package matching '%s' is available" % pkgname) else: return False, False, False From b747d9411ac346835c4a38dbe3997c72c0e78137 Mon Sep 17 00:00:00 2001 From: fabios Date: Sat, 27 Dec 2014 21:19:00 -0500 Subject: [PATCH 062/236] improve fail message and use itertools chain --- packaging/os/apt.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index d1101bc7b8b..77f1e431b2f 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -144,6 +144,7 @@ warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning) import os import datetime import fnmatch +import itertools # APT related constants APT_ENV_VARS = dict( @@ -180,9 +181,8 @@ def package_versions(pkgname, pkg, pkg_cache): # assume older version of python-apt is installed # apt.package.Package#versions require python-apt >= 0.7.9. pkg_cache_list = (p for p in pkg_cache.Packages if p.Name == pkgname) - pkg_list_of_lists = (p.VersionList for p in pkg_cache_list) - pkg_versions = (p for l in pkg_list_of_lists for p in l) - versions = set(p.VerStr for p in pkg_versions) + pkg_versions = (p.VersionList for p in pkg_cache_list) + versions = set(p.VerStr for p in itertools.chain(*pkg_versions)) return versions @@ -205,10 +205,10 @@ def package_status(m, pkgname, version, cache, state): try: if cache.get_providing_packages(pkgname): return False, True, False + m.fail_json(msg="No package matching '%s' is available" % pkgname) except AttributeError: - # older python-apt providing packages cannot be used - pass - m.fail_json(msg="No package matching '%s' is available" % pkgname) + # python-apt version too old to detect virtual packages + m.fail_json(msg="No package matching '%s' is available (python-apt version too old to detect virtual packages)" % pkgname) else: return False, False, False try: From 9ed842e2d0442112f3674dd4ab063e33d70c0254 Mon Sep 17 00:00:00 2001 From: fabios Date: Sun, 28 Dec 2014 12:52:48 -0500 Subject: [PATCH 063/236] mark as upgradable and let apt-get install deal with it --- packaging/os/apt.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 77f1e431b2f..ad1807f9c20 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -208,7 +208,8 @@ def package_status(m, pkgname, version, cache, state): m.fail_json(msg="No package matching '%s' is available" % pkgname) except AttributeError: # python-apt version too old to detect virtual packages - m.fail_json(msg="No package matching '%s' is available (python-apt version too old to detect virtual packages)" % pkgname) + # mark as upgradable and let apt-get install deal with it + return False, True, False else: return False, False, False try: From ad7f5abf28b1b637c9b108a586ab7d22f8d031c4 Mon Sep 17 00:00:00 2001 From: Philip Misiowiec Date: Fri, 26 Sep 2014 19:02:25 -0700 Subject: [PATCH 064/236] Ability to detach an EBS volume from an EC2 instance --- cloud/amazon/ec2_vol.py | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 7919a9ec47e..050863c22fb 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -168,16 +168,20 @@ EXAMPLES = ''' id: vol-XXXXXXXX state: absent +# Detach a volume +- ec2_vol: + id: vol-XXXXXXXX + instance: None + # List volumes for an instance - ec2_vol: instance: i-XXXXXX state: list # Create new volume using SSD storage -- local_action: - module: ec2_vol - instance: XXXXXX - volume_size: 50 +- ec2_vol: + instance: XXXXXX + volume_size: 50 volume_type: gp2 device_name: /dev/xvdf ''' @@ -261,15 +265,18 @@ def create_volume(module, ec2, zone): if iops: volume_type = 'io1' + if instance == 'None' or instance == '': + instance = None + # If no instance supplied, try volume creation based on module parameters. if name or id: - if not instance: - module.fail_json(msg = "If name or id is specified, instance must also be specified") if iops or volume_size: module.fail_json(msg = "Parameters are not compatible: [id or name] and [iops or volume_size]") volume = get_volume(module, ec2) if volume.attachment_state() is not None: + if instance is None: + return volume adata = volume.attach_data if adata.instance_id != instance: module.fail_json(msg = "Volume %s is already attached to another instance: %s" @@ -331,6 +338,13 @@ def attach_volume(module, ec2, volume, instance): except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) +def detach_volume(module, ec2): + vol = get_volume(module, ec2) + if not vol or vol.attachment_state() is None: + module.exit_json(changed=False) + else: + vol.detach() + module.exit_json(changed=True) def main(): argument_spec = ec2_argument_spec() @@ -362,6 +376,9 @@ def main(): snapshot = module.params.get('snapshot') state = module.params.get('state') + if instance == 'None' or instance == '': + instance = None + ec2 = ec2_connect(module) if state == 'list': @@ -428,6 +445,8 @@ def main(): volume = create_volume(module, ec2, zone) if instance: attach_volume(module, ec2, volume, inst) + else: + detach_volume(module, ec2) module.exit_json(volume_id=volume.id, device=device_name, volume_type=volume.type) # import module snippets From 71d1044b8b6daf13fdc6b917cf91744500b752a3 Mon Sep 17 00:00:00 2001 From: Philip Misiowiec Date: Tue, 30 Dec 2014 23:37:38 -0800 Subject: [PATCH 065/236] Adds tenancy state to returning json --- cloud/amazon/ec2.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 050ed0b63f4..9bb0786753d 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -604,6 +604,11 @@ def get_instance_info(inst): except AttributeError: instance_info['ebs_optimized'] = False + try: + instance_info['tenancy'] = getattr(inst, 'placement_tenancy') + except AttributeError: + instance_info['tenancy'] = 'default' + return instance_info def boto_supports_associate_public_ip_address(ec2): From cfda942376fc7cb6eb3de75cdd8c423afd2deddd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lu=C3=ADs=20Guilherme=20F=2E=20Pereira?= Date: Wed, 1 Oct 2014 11:22:22 -0300 Subject: [PATCH 066/236] Allow ec2_lc to create EC2-Classic Launch Configs Removes default value from ec2_lc so it can create launch configurations valid on a EC2-Classic environment. AWS API will not accept a assign_public_ip when creating an ASG outside of VPC. --- cloud/amazon/ec2_lc.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 193a839c87d..30f532c9e4f 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -93,7 +93,6 @@ options: description: - Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC. required: false - default: false aliases: [] version_added: "1.8" ramdisk_id: @@ -255,7 +254,7 @@ def main(): ebs_optimized=dict(default=False, type='bool'), associate_public_ip_address=dict(type='bool'), instance_monitoring=dict(default=False, type='bool'), - assign_public_ip=dict(default=False, type='bool') + assign_public_ip=dict(type='bool') ) ) From 6237dab4cf3e2442403646825b8f6f9e72e40b7b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 6 Jan 2015 10:06:50 -0500 Subject: [PATCH 067/236] fix for when state=directory, follow=yes and target is symlink to directory --- files/file.py | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/files/file.py b/files/file.py index e154d6ad07f..46185f29215 100644 --- a/files/file.py +++ b/files/file.py @@ -103,6 +103,23 @@ EXAMPLES = ''' ''' + +def get_state(path): + ''' Find out current state ''' + + if os.path.lexists(path): + if os.path.islink(path): + return 'link' + elif os.path.isdir(path): + return 'directory' + elif os.stat(path).st_nlink > 1: + return 'hard' + else: + # could be many other things, but defaulting to file + return 'file' + + return 'absent' + def main(): module = AnsibleModule( @@ -143,18 +160,7 @@ def main(): pass module.exit_json(path=path, changed=False, appears_binary=appears_binary) - # Find out current state - prev_state = 'absent' - if os.path.lexists(path): - if os.path.islink(path): - prev_state = 'link' - elif os.path.isdir(path): - prev_state = 'directory' - elif os.stat(path).st_nlink > 1: - prev_state = 'hard' - else: - # could be many other things, but defaulting to file - prev_state = 'file' + prev_state = get_state(path) # state should default to file, but since that creates many conflicts, # default to 'current' when it exists. @@ -220,6 +226,11 @@ def main(): module.exit_json(path=path, changed=changed) elif state == 'directory': + + if follow and prev_state == 'link': + path = os.readlink(path) + prev_state = get_state(path) + if prev_state == 'absent': if module.check_mode: module.exit_json(changed=True) From c1eb7a4c1d41556c53dbcca597ca78ab964bca0d Mon Sep 17 00:00:00 2001 From: Vasyl Kaigorodov Date: Tue, 6 Jan 2015 20:44:09 +0100 Subject: [PATCH 068/236] gce_net - creating firewall rule, src_range value seems to get lost or set to empty string -- fixes #252 --- cloud/google/gce_net.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/google/gce_net.py b/cloud/google/gce_net.py index 102a73f2bd1..10592d20033 100644 --- a/cloud/google/gce_net.py +++ b/cloud/google/gce_net.py @@ -156,7 +156,7 @@ def main(): ipv4_range = dict(), fwname = dict(), name = dict(), - src_range = dict(), + src_range = dict(type='list'), src_tags = dict(type='list'), state = dict(default='present'), service_account_email = dict(), From 53404c786aa45e88cfd572ada353c921b8d367ec Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 6 Jan 2015 16:06:45 -0500 Subject: [PATCH 069/236] added version added for new template options --- cloud/vmware/vsphere_guest.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 817421011d2..8ad7df41dea 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -69,11 +69,13 @@ options: default: present choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured'] from_template: + version_added: "1.9" description: - Specifies if the VM should be deployed from a template (cannot be ran with state) default: no choices: ['yes', 'no'] template_src: + version_added: "1.9" description: - Name of the source template to deploy from default: None From 2973bac72f04454014b0d4453de6065e68aa74c1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 7 Jan 2015 08:45:55 -0800 Subject: [PATCH 070/236] Clarify documented behaviour of user module's ssh_key_file parameter Fixes #9873 --- system/user.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/system/user.py b/system/user.py index aaeee5683d3..62dfb271e2d 100644 --- a/system/user.py +++ b/system/user.py @@ -153,10 +153,11 @@ options: present on target host. ssh_key_file: required: false - default: $HOME/.ssh/id_rsa + default: .ssh/id_rsa version_added: "0.9" description: - - Optionally specify the SSH key filename. + - Optionally specify the SSH key filename. If this is a relative + filename then it will be relative to the user's home directory. ssh_key_comment: required: false default: ansible-generated @@ -189,8 +190,8 @@ EXAMPLES = ''' # Remove the user 'johnd' - user: name=johnd state=absent remove=yes -# Create a 2048-bit SSH key for user jsmith -- user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 +# Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa +- user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa ''' import os From 78cacd0c2270feae0c03624f2e784fca3985c865 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 7 Jan 2015 13:16:53 -0500 Subject: [PATCH 071/236] fix for allowing permissions on hard links and soft links + follow=yes --- files/file.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/files/file.py b/files/file.py index 002776fd6ff..35bb52ab1e3 100644 --- a/files/file.py +++ b/files/file.py @@ -218,7 +218,15 @@ def main(): module.exit_json(path=path, changed=False) elif state == 'file': + if state != prev_state: + if follow and prev_state == 'link': + # follow symlink and operate on original + path = os.readlink(path) + prev_state = get_state(path) + file_args['path'] = path + + if prev_state not in ['file','hard']: # file is not absent and any other state is a conflict module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state)) From 242aa9f81b4a54fe4cf4da3c84e802e83c254b2c Mon Sep 17 00:00:00 2001 From: James Martin Date: Tue, 6 Jan 2015 14:04:03 -0500 Subject: [PATCH 072/236] vpc_zone_identifier must be a csv string when an asg is updated. --- cloud/amazon/ec2_asg.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 8c9661706b0..6e5d3508cb8 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -326,6 +326,8 @@ def create_autoscaling_group(connection, module): for attr in ASG_ATTRIBUTES: if module.params.get(attr): module_attr = module.params.get(attr) + if attr == 'vpc_zone_identifier': + module_attr = ','.join(module_attr) group_attr = getattr(as_group, attr) # we do this because AWS and the module may return the same list # sorted differently From a07873d6a39dee7a26cae07b1e4619660a17f5db Mon Sep 17 00:00:00 2001 From: Bruce Pennypacker Date: Thu, 8 Jan 2015 16:26:22 +0000 Subject: [PATCH 073/236] Added support for 'REQUIRE SSL' grant option --- database/mysql/mysql_user.py | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index e160fcb68f6..68d6f031490 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -117,6 +117,9 @@ EXAMPLES = """ # Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION' - mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present +# Modifiy user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself. +- mysql_user: name=bob append=true priv=*.*:REQUIRESSL state=present + # Ensure no user named 'sally' exists, also passing in the auth credentials. - mysql_user: login_user=root login_password=123456 name=sally state=absent @@ -159,7 +162,7 @@ VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION', 'EXECUTE', 'FILE', 'CREATE TABLESPACE', 'CREATE USER', 'PROCESS', 'PROXY', 'RELOAD', 'REPLICATION CLIENT', 'REPLICATION SLAVE', 'SHOW DATABASES', 'SHUTDOWN', - 'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE',)) + 'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE', 'REQUIRESSL')) class InvalidPrivsError(Exception): pass @@ -261,6 +264,8 @@ def privileges_get(cursor, user,host): privileges = [ pick(x) for x in privileges] if "WITH GRANT OPTION" in res.group(4): privileges.append('GRANT') + if "REQUIRE SSL" in res.group(4): + privileges.append('REQUIRESSL') db = res.group(2) output[db] = privileges return output @@ -294,6 +299,11 @@ def privileges_unpack(priv): if '*.*' not in output: output['*.*'] = ['USAGE'] + # if we are only specifying something like REQUIRESSL in *.* we still need + # to add USAGE as a privilege to avoid syntax errors + if priv.find('REQUIRESSL') != -1 and 'USAGE' not in output['*.*']: + output['*.*'].append('USAGE') + return output def privileges_revoke(cursor, user,host,db_table,grant_option): @@ -307,19 +317,28 @@ def privileges_revoke(cursor, user,host,db_table,grant_option): query = ["REVOKE ALL PRIVILEGES ON %s" % mysql_quote_identifier(db_table, 'table')] query.append("FROM %s@%s") query = ' '.join(query) - cursor.execute(query, (user, host)) + try: + cursor.execute(query, (user, host)) + except Exception, e: + raise Exception("%s. Query=\"%s\"" % (str(e), query % (user, host))) def privileges_grant(cursor, user,host,db_table,priv): # Escape '%' since mysql db.execute uses a format string and the # specification of db and table often use a % (SQL wildcard) db_table = db_table.replace('%', '%%') - priv_string = ",".join(filter(lambda x: x != 'GRANT', priv)) + priv_string = ",".join(filter(lambda x: x not in [ 'GRANT', 'REQUIRESSL' ], priv)) query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))] query.append("TO %s@%s") if 'GRANT' in priv: - query.append("WITH GRANT OPTION") + query.append(" WITH GRANT OPTION") + if 'REQUIRESSL' in priv: + query.append(" REQUIRE SSL") query = ' '.join(query) - cursor.execute(query, (user, host)) + try: + cursor.execute(query, (user, host)) + except Exception, e: + raise Exception("%s. Query=\"%s\"" % (str(e), query % (user, host))) + def strip_quotes(s): From 272bb1fa63cc869e36b4830b8094195ba4999297 Mon Sep 17 00:00:00 2001 From: Bruce Pennypacker Date: Thu, 8 Jan 2015 21:41:15 +0000 Subject: [PATCH 074/236] requested changes --- database/mysql/mysql_user.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 68d6f031490..7d4777fb831 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -317,10 +317,7 @@ def privileges_revoke(cursor, user,host,db_table,grant_option): query = ["REVOKE ALL PRIVILEGES ON %s" % mysql_quote_identifier(db_table, 'table')] query.append("FROM %s@%s") query = ' '.join(query) - try: - cursor.execute(query, (user, host)) - except Exception, e: - raise Exception("%s. Query=\"%s\"" % (str(e), query % (user, host))) + cursor.execute(query, (user, host)) def privileges_grant(cursor, user,host,db_table,priv): # Escape '%' since mysql db.execute uses a format string and the @@ -330,16 +327,11 @@ def privileges_grant(cursor, user,host,db_table,priv): query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))] query.append("TO %s@%s") if 'GRANT' in priv: - query.append(" WITH GRANT OPTION") + query.append("WITH GRANT OPTION") if 'REQUIRESSL' in priv: - query.append(" REQUIRE SSL") + query.append("REQUIRE SSL") query = ' '.join(query) - try: - cursor.execute(query, (user, host)) - except Exception, e: - raise Exception("%s. Query=\"%s\"" % (str(e), query % (user, host))) - - + cursor.execute(query, (user, host)) def strip_quotes(s): """ Remove surrounding single or double quotes From 0b2d190f7243d702b471d46d0aa1151fd5384869 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sun, 11 Jan 2015 04:27:10 +0100 Subject: [PATCH 075/236] Use the rpm python module rather than execing rpm Using the rpm module prevent a uneeded fork, and permit to skip the signature checking which slow down a bit the operation, and which would be done by yum on installation anyway. --- packaging/os/yum.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 73fbb699e75..b652fd344ab 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -25,6 +25,7 @@ import traceback import os import yum +import rpm try: from yum.misc import find_unfinished_transactions, find_ts_remaining @@ -108,7 +109,7 @@ options: notes: [] # informational: requirements for nodes -requirements: [ yum, rpm ] +requirements: [ yum ] author: Seth Vidal ''' @@ -405,14 +406,19 @@ def transaction_exists(pkglist): def local_nvra(module, path): """return nvra of a local rpm passed in""" - - cmd = ['/bin/rpm', '-qp' ,'--qf', - '%{name}-%{version}-%{release}.%{arch}\n', path ] - rc, out, err = module.run_command(cmd) - if rc != 0: - return None - nvra = out.split('\n')[0] - return nvra + + ts = rpm.TransactionSet() + ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES) + fd = os.open(path, os.O_RDONLY) + try: + header = ts.hdrFromFdno(fd) + finally: + os.close(fd) + + return '%s-%s-%s.%s' % (header[rpm.RPMTAG_NAME], + header[rpm.RPMTAG_VERSION], + header[rpm.RPMTAG_RELEASE], + header[rpm.RPMTAG_ARCH]) def pkg_to_dict(pkgstr): From bcfba0c05098696b6e770335870a9c22792fec38 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Mon, 12 Jan 2015 01:02:29 +0100 Subject: [PATCH 076/236] Remove unused code There is no call to yum_base using 'cachedir' argument, so while it work fine from a cursory look, that's useless code, and so should be removed to clarify the code. --- packaging/os/yum.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 73fbb699e75..65d5b43b07c 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -149,21 +149,13 @@ def log(msg): syslog.openlog('ansible-yum', 0, syslog.LOG_USER) syslog.syslog(syslog.LOG_NOTICE, msg) -def yum_base(conf_file=None, cachedir=False): +def yum_base(conf_file=None): my = yum.YumBase() my.preconf.debuglevel=0 my.preconf.errorlevel=0 if conf_file and os.path.exists(conf_file): my.preconf.fn = conf_file - if cachedir or os.geteuid() != 0: - if hasattr(my, 'setCacheDir'): - my.setCacheDir() - else: - cachedir = yum.misc.getCacheDir() - my.repos.setCacheDir(cachedir) - my.conf.cache = 0 - return my def install_yum_utils(module): From 826d313aada2157742ba327a40ffa3749739c1a0 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Mon, 12 Jan 2015 19:08:22 +0100 Subject: [PATCH 077/236] Do not use echo shell builtin for password Using \t in a password may result in a different password being set : $ echo 'a\ta' a a Problem report originally found by Pilou- ( https://github.com/ansible/ansible-modules-extras/pull/198 ) --- system/user.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/system/user.py b/system/user.py index 62dfb271e2d..e1fa7f203aa 100644 --- a/system/user.py +++ b/system/user.py @@ -263,12 +263,12 @@ class User(object): # select whether we dump additional debug info through syslog self.syslogging = False - def execute_command(self, cmd, use_unsafe_shell=False): + def execute_command(self, cmd, use_unsafe_shell=False, data=None): if self.syslogging: syslog.openlog('ansible-%s' % os.path.basename(__file__)) syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd)) - return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell) + return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) def remove_user_userdel(self): cmd = [self.module.get_bin_path('userdel', True)] @@ -1368,11 +1368,10 @@ class AIX(User): # set password with chpasswd if self.password is not None: cmd = [] - cmd.append('echo \''+self.name+':'+self.password+'\' |') cmd.append(self.module.get_bin_path('chpasswd', True)) cmd.append('-e') cmd.append('-c') - self.execute_command(' '.join(cmd), use_unsafe_shell=True) + self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password)) return (rc, out, err) @@ -1444,11 +1443,10 @@ class AIX(User): # set password with chpasswd if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd = [] - cmd.append('echo \''+self.name+':'+self.password+'\' |') cmd.append(self.module.get_bin_path('chpasswd', True)) cmd.append('-e') cmd.append('-c') - (rc2, out2, err2) = self.execute_command(' '.join(cmd), use_unsafe_shell=True) + (rc2, out2, err2) = self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password)) else: (rc2, out2, err2) = (None, '', '') From fbb9dcc69a33a2051502ef3cb1a43b3e5a97a2d7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 12 Jan 2015 14:36:57 -0800 Subject: [PATCH 078/236] Also catch mysql errors so we can give the error message back through json rather than tracebacking --- database/mysql/mysql_user.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 7d4777fb831..3590fb8e640 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -498,16 +498,14 @@ def main(): if user_exists(cursor, user, host): try: changed = user_mod(cursor, user, host, password, priv, append_privs) - except SQLParseError, e: - module.fail_json(msg=str(e)) - except InvalidPrivsError, e: + except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: module.fail_json(msg=str(e)) else: if password is None: module.fail_json(msg="password parameter required when adding a user") try: changed = user_add(cursor, user, host, password, priv) - except SQLParseError, e: + except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: module.fail_json(msg=str(e)) elif state == "absent": if user_exists(cursor, user, host): From d2ae2e6cc652ce082dd3b8522608407e12d26146 Mon Sep 17 00:00:00 2001 From: Alex Clifford Date: Wed, 14 Jan 2015 12:12:02 +1100 Subject: [PATCH 079/236] Fix slight typo in doco --- cloud/amazon/rds_subnet_group.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/rds_subnet_group.py b/cloud/amazon/rds_subnet_group.py index bba6cd86872..9731154f77c 100644 --- a/cloud/amazon/rds_subnet_group.py +++ b/cloud/amazon/rds_subnet_group.py @@ -79,8 +79,8 @@ EXAMPLES = ''' - subnet-aaaaaaaa - subnet-bbbbbbbb -# Remove a parameter group -- rds_param_group: +# Remove a subnet group +- rds_subnet_group: state: absent name: norwegian-blue ''' From 68b5b7be72fe3387f5fae954d391802944f07c40 Mon Sep 17 00:00:00 2001 From: Tim G Date: Wed, 14 Jan 2015 13:15:38 +1000 Subject: [PATCH 080/236] distutils is not available on some non-Linux OS's --- system/service.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index 275bac900a9..fea5722710e 100644 --- a/system/service.py +++ b/system/service.py @@ -106,7 +106,8 @@ import select import time import string -from distutils.version import LooseVersion +if platform.system() == 'Linux': + from distutils.version import LooseVersion class Service(object): """ From 6cbce4d911b62a6135bcfcbb6da9eb3e497c0fbb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 13 Jan 2015 22:39:20 -0800 Subject: [PATCH 081/236] Potential fix for 640 --- cloud/amazon/elasticache.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index c1846f525a8..4e76d593cc9 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -357,7 +357,9 @@ class ElastiCacheManager(object): 'modifying': 'available', 'deleting': 'gone' } - + if self.status == awaited_status: + # No need to wait, we're already done + return if status_map[self.status] != awaited_status: msg = "Invalid awaited status. '%s' cannot transition to '%s'" self.module.fail_json(msg=msg % (self.status, awaited_status)) From 477391bb24322629b9c10a342415a66f0bcef7b3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 13 Jan 2015 23:18:04 -0800 Subject: [PATCH 082/236] Better error messages if a2enmod/a2dismod are not found --- web_infrastructure/apache2_module.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/web_infrastructure/apache2_module.py b/web_infrastructure/apache2_module.py index 39351482087..bd6de56aed2 100644 --- a/web_infrastructure/apache2_module.py +++ b/web_infrastructure/apache2_module.py @@ -49,6 +49,9 @@ import re def _disable_module(module): name = module.params['name'] a2dismod_binary = module.get_bin_path("a2dismod") + if a2dismod_binary is None: + module.fail_json(msg="a2dismod not found. Perhaps this system does not use a2dismod to manage apache") + result, stdout, stderr = module.run_command("%s %s" % (a2dismod_binary, name)) if re.match(r'.*' + name + r' already disabled.*', stdout, re.S): @@ -61,6 +64,9 @@ def _disable_module(module): def _enable_module(module): name = module.params['name'] a2enmod_binary = module.get_bin_path("a2enmod") + if a2enmod_binary is None: + module.fail_json(msg="a2enmod not found. Perhaps this system does not use a2enmod to manage apache") + result, stdout, stderr = module.run_command("%s %s" % (a2enmod_binary, name)) if re.match(r'.*' + name + r' already enabled.*', stdout, re.S): @@ -86,4 +92,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() From 6159b5c4235b427cd6553a2d8d99d4fff12bc805 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 14 Jan 2015 13:10:13 -0800 Subject: [PATCH 083/236] Implement user,group,mode,selinux settings for unarchive. This is a partial fix for #234. Still have to figure out how to make change reporting work as we can no longer rely on tar's --compare option --- files/unarchive.py | 91 +++++++++++++++++++++++++++++++--------------- 1 file changed, 62 insertions(+), 29 deletions(-) diff --git a/files/unarchive.py b/files/unarchive.py index f46e52e02a3..c567cfc3d8a 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -76,16 +76,33 @@ EXAMPLES = ''' ''' import os +from zipfile import ZipFile +class UnarchiveError(Exception): + pass # class to handle .zip files class ZipFile(object): - + def __init__(self, src, dest, module): self.src = src self.dest = dest self.module = module self.cmd_path = self.module.get_bin_path('unzip') + self._files_in_archive = [] + + @property + def files_in_archive(self, force_refresh=False): + if self._files_in_archive and not force_refresh: + return self._files_in_archive + + archive = ZipFile(self.src) + try: + self._files_in_archive = archive.namelist() + except: + raise UnarchiveError('Unable to list files in the archive') + + return self._files_in_archive def is_unarchived(self): return dict(unarchived=False) @@ -107,13 +124,29 @@ class ZipFile(object): # class to handle gzipped tar files class TgzFile(object): - + def __init__(self, src, dest, module): self.src = src self.dest = dest self.module = module self.cmd_path = self.module.get_bin_path('tar') self.zipflag = 'z' + self._files_in_archive = [] + + @property + def files_in_archive(self, force_refresh=False): + if self._files_in_archive and not force_refresh: + return self._files_in_archive + + cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src) + rc, out, err = self.module.run_command(cmd) + if rc != 0: + raise UnarchiveError('Unable to list files in the archive') + + for filename in out.splitlines(): + if filename: + self._files_in_archive.append(filename) + return self._files_in_archive def is_unarchived(self): cmd = '%s -v -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src) @@ -129,41 +162,35 @@ class TgzFile(object): def can_handle_archive(self): if not self.cmd_path: return False - cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src) - rc, out, err = self.module.run_command(cmd) - if rc == 0: - if len(out.splitlines(True)) > 0: + + try: + if self.files_in_archive: return True + except UnarchiveError: + pass + # Errors and no files in archive assume that we weren't able to + # properly unarchive it return False # class to handle tar files that aren't compressed class TarFile(TgzFile): def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') + super(TarFile, self).__init__(src, dest, module) self.zipflag = '' # class to handle bzip2 compressed tar files class TarBzip(TgzFile): def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') + super(TarFile, self).__init__(src, dest, module) self.zipflag = 'j' # class to handle xz compressed tar files class TarXz(TgzFile): def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') + super(TarFile, self).__init__(src, dest, module) self.zipflag = 'J' @@ -193,6 +220,7 @@ def main(): src = os.path.expanduser(module.params['src']) dest = os.path.expanduser(module.params['dest']) copy = module.params['copy'] + file_args = module.load_file_common_arguments(module.params) # did tar file arrive? if not os.path.exists(src): @@ -217,20 +245,25 @@ def main(): res_args['check_results'] = handler.is_unarchived() if res_args['check_results']['unarchived']: res_args['changed'] = False - module.exit_json(**res_args) - - # do the unpack - try: - res_args['extract_results'] = handler.unarchive() - if res_args['extract_results']['rc'] != 0: - module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args) - except IOError: - module.fail_json(msg="failed to unpack %s to %s" % (src, dest)) + else: + # do the unpack + try: + res_args['extract_results'] = handler.unarchive() + if res_args['extract_results']['rc'] != 0: + module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args) + except IOError: + module.fail_json(msg="failed to unpack %s to %s" % (src, dest)) + else: + res_args['changed'] = True - res_args['changed'] = True + # do we need to change perms? + for filename in handler.files_in_archive: + file_args['path'] = os.path.join(dest, filename) + res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed']) module.exit_json(**res_args) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() From e2dcb5fc9bd701fd89931ace29e99a198140c300 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 14 Jan 2015 17:22:05 -0500 Subject: [PATCH 084/236] now captures cache fetch failures --- packaging/os/apt.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index ad1807f9c20..9f5b8fd4cda 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -619,6 +619,8 @@ def main(): except apt.cache.LockFailedException: module.fail_json(msg="Failed to lock apt for exclusive operation") + except apt.cache.FetchFailedException: + module.fail_json(msg="Could not fetch updated apt files") # import module snippets from ansible.module_utils.basic import * From e3759bd0d396d63b869732a63ecae5ca7a2a9641 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 14 Jan 2015 19:12:35 -0800 Subject: [PATCH 085/236] Add detection of uid,gid,mode changes when deciding whether an archive needs to be unarchived again. --- files/unarchive.py | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/files/unarchive.py b/files/unarchive.py index c567cfc3d8a..db9defb37c4 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -104,7 +104,7 @@ class ZipFile(object): return self._files_in_archive - def is_unarchived(self): + def is_unarchived(self, mode, owner, group): return dict(unarchived=False) def unarchive(self): @@ -148,10 +148,32 @@ class TgzFile(object): self._files_in_archive.append(filename) return self._files_in_archive - def is_unarchived(self): - cmd = '%s -v -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src) + def is_unarchived(self, mode, owner, group): + cmd = '%s -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src) rc, out, err = self.module.run_command(cmd) unarchived = (rc == 0) + if not unarchived: + # Check whether the differences are in something that we're + # setting anyway + + # What will be set + to_be_set = set() + for perm in (('Mode', mode), ('Gid', group), ('Uid', owner)): + if perm[1] is not None: + to_be_set.add(perm[0]) + + # What is different + changes = set() + difference_re = re.compile(r': (.*) differs$') + for line in out.splitlines(): + match = difference_re.search(line) + if not match: + # Unknown tar output. Assume we have changes + return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd) + changes.add(match.groups()[0]) + + if changes and changes.issubset(to_be_set): + unarchived = True return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd) def unarchive(self): @@ -242,7 +264,8 @@ def main(): res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src) # do we need to do unpack? - res_args['check_results'] = handler.is_unarchived() + res_args['check_results'] = handler.is_unarchived(file_args['mode'], + file_args['owner'], file_args['group']) if res_args['check_results']['unarchived']: res_args['changed'] = False else: From f4a709ad7c122f42b3edd74482dd0d08afc7a95b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 15 Jan 2015 15:32:19 -0500 Subject: [PATCH 086/236] simplified update-rc.d enable/disable handling, removed forced defaults creation as that breaks local customizations --- system/service.py | 49 ++++++++++++++++------------------------------- 1 file changed, 17 insertions(+), 32 deletions(-) diff --git a/system/service.py b/system/service.py index 275bac900a9..5da1796416d 100644 --- a/system/service.py +++ b/system/service.py @@ -105,6 +105,7 @@ import shlex import select import time import string +import glob from distutils.version import LooseVersion @@ -734,44 +735,28 @@ class LinuxService(Service): # update-rc.d style # if self.enable_cmd.endswith("update-rc.d"): - if self.enable: - action = 'enable' - else: - action = 'disable' - if self.enable: - # make sure the init.d symlinks are created - # otherwise enable might not work - (rc, out, err) = self.execute_command("%s %s defaults" \ - % (self.enable_cmd, self.name)) + enabled = False + links = glob.glob('/etc/rc?.d/S??' + self.name) + if links: + enabled = True + + if self.enable != enabled: + self.changed = True + + if self.enable: + action = 'enable' + else: + action = 'disable' + + (rc, out, err) = self.execute_command("%s %s %s" % (self.enable_cmd, self.name, action)) if rc != 0: if err: self.module.fail_json(msg=err) else: - self.module.fail_json(msg=out) + self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action) - (rc, out, err) = self.execute_command("%s -n %s %s" \ - % (self.enable_cmd, self.name, action)) - self.changed = False - for line in out.splitlines(): - if line.startswith('rename'): - self.changed = True - break - elif self.enable and 'do not exist' in line: - self.changed = True - break - elif not self.enable and 'already exist' in line: - self.changed = True - break - - # Debian compatibility - for line in err.splitlines(): - if self.enable and 'no runlevel symlinks to modify' in line: - self.changed = True - break - - if not self.changed: - return + return # # If we've gotten to the end, the service needs to be updated From 9ac5ed26213b5bdb821433096bc9d566eaea51c5 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 15 Jan 2015 15:53:39 -0500 Subject: [PATCH 087/236] now allows for case in which someone removed the K* links and is trying to reenable a service --- system/service.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/system/service.py b/system/service.py index 5da1796416d..4b82d6ecc6a 100644 --- a/system/service.py +++ b/system/service.py @@ -737,8 +737,8 @@ class LinuxService(Service): if self.enable_cmd.endswith("update-rc.d"): enabled = False - links = glob.glob('/etc/rc?.d/S??' + self.name) - if links: + slinks = glob.glob('/etc/rc?.d/S??' + self.name) + if slinks: enabled = True if self.enable != enabled: @@ -746,6 +746,14 @@ class LinuxService(Service): if self.enable: action = 'enable' + klinks = glob.glob('/etc/rc?.d/K??' + self.name) + if not klinks: + (rc, out, err) = self.execute_command("%s %s defaults" % (self.enable_cmd, self.name)) + if rc != 0: + if err: + self.module.fail_json(msg=err) + else: + self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action) else: action = 'disable' From 8f43a0a4fe641b36e31e63418a3263e60b695d8e Mon Sep 17 00:00:00 2001 From: Tim G Date: Fri, 16 Jan 2015 10:06:36 +1000 Subject: [PATCH 088/236] Load distutils on all platforms EXCEPT Solaris. Solaris doesn't ship distutils with with the default Python package. This patch fixes "service" on Solaris since 30d6713. --- system/service.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index fea5722710e..362359dd553 100644 --- a/system/service.py +++ b/system/service.py @@ -106,7 +106,8 @@ import select import time import string -if platform.system() == 'Linux': +# The distutils module is not shipped with SUNWPython on Solaris. +if platform.system() != 'SunOS': from distutils.version import LooseVersion class Service(object): From b2fd4d18e000c39cbae63e4df09fce2635e9bebe Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 15 Jan 2015 19:13:32 -0600 Subject: [PATCH 089/236] It is not correct that you need at least 1 primary node --- cloud/rackspace/rax_clb_nodes.py | 24 +----------------------- 1 file changed, 1 insertion(+), 23 deletions(-) diff --git a/cloud/rackspace/rax_clb_nodes.py b/cloud/rackspace/rax_clb_nodes.py index 24325b44597..472fad19b1c 100644 --- a/cloud/rackspace/rax_clb_nodes.py +++ b/cloud/rackspace/rax_clb_nodes.py @@ -150,21 +150,6 @@ def _get_node(lb, node_id=None, address=None, port=None): return None -def _is_primary(node): - """Return True if node is primary and enabled""" - return (node.type.lower() == 'primary' and - node.condition.lower() == 'enabled') - - -def _get_primary_nodes(lb): - """Return a list of primary and enabled nodes""" - nodes = [] - for node in lb.nodes: - if _is_primary(node): - nodes.append(node) - return nodes - - def main(): argument_spec = rax_argument_spec() argument_spec.update( @@ -230,13 +215,6 @@ def main(): if state == 'absent': if not node: # Removing a non-existent node module.exit_json(changed=False, state=state) - - # The API detects this as well but currently pyrax does not return a - # meaningful error message - if _is_primary(node) and len(_get_primary_nodes(lb)) == 1: - module.fail_json( - msg='At least one primary node has to be enabled') - try: lb.delete_node(node) result = {} @@ -299,5 +277,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.rax import * -### invoke the module +# invoke the module main() From d2829c2510373a52230ab5d034e60f173d2e5e09 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 15 Jan 2015 19:27:34 -0600 Subject: [PATCH 090/236] Just use built in required functionality for arguments --- cloud/rackspace/rax_clb.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/cloud/rackspace/rax_clb.py b/cloud/rackspace/rax_clb.py index 7a2699709da..38baa77b6ff 100644 --- a/cloud/rackspace/rax_clb.py +++ b/cloud/rackspace/rax_clb.py @@ -140,10 +140,6 @@ except ImportError: def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, vip_type, timeout, wait, wait_timeout, vip_id): - for arg in (state, name, port, protocol, vip_type): - if not arg: - module.fail_json(msg='%s is required for rax_clb' % arg) - if int(timeout) < 30: module.fail_json(msg='"timeout" must be greater than or equal to 30') @@ -257,7 +253,7 @@ def main(): algorithm=dict(choices=CLB_ALGORITHMS, default='LEAST_CONNECTIONS'), meta=dict(type='dict', default={}), - name=dict(), + name=dict(required=True), port=dict(type='int', default=80), protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'), state=dict(default='present', choices=['present', 'absent']), From 6ca357198e464d079294b600befc04da36aacef6 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 16 Jan 2015 15:33:41 -0600 Subject: [PATCH 091/236] Clean up some required argument logic --- cloud/rackspace/rax_keypair.py | 4 ++-- cloud/rackspace/rax_network.py | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/cloud/rackspace/rax_keypair.py b/cloud/rackspace/rax_keypair.py index 591ad8c3597..8f38abc12e0 100644 --- a/cloud/rackspace/rax_keypair.py +++ b/cloud/rackspace/rax_keypair.py @@ -104,7 +104,7 @@ def rax_keypair(module, name, public_key, state): keypair = {} if state == 'present': - if os.path.isfile(public_key): + if public_key and os.path.isfile(public_key): try: f = open(public_key) public_key = f.read() @@ -143,7 +143,7 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - name=dict(), + name=dict(required=True), public_key=dict(), state=dict(default='present', choices=['absent', 'present']), ) diff --git a/cloud/rackspace/rax_network.py b/cloud/rackspace/rax_network.py index bc4745a7a84..bd23f5f878d 100644 --- a/cloud/rackspace/rax_network.py +++ b/cloud/rackspace/rax_network.py @@ -65,10 +65,6 @@ except ImportError: def cloud_network(module, state, label, cidr): - for arg in (state, label, cidr): - if not arg: - module.fail_json(msg='%s is required for cloud_networks' % arg) - changed = False network = None networks = [] @@ -79,6 +75,9 @@ def cloud_network(module, state, label, cidr): 'incorrectly capitalized region name.') if state == 'present': + if not cidr: + module.fail_json(msg='missing required arguments: cidr') + try: network = pyrax.cloud_networks.find_network_by_label(label) except pyrax.exceptions.NetworkNotFound: @@ -115,7 +114,7 @@ def main(): dict( state=dict(default='present', choices=['present', 'absent']), - label=dict(), + label=dict(required=True), cidr=dict() ) ) From e8c328773a1daa5ffa715d2da05003a47bae6bda Mon Sep 17 00:00:00 2001 From: Patrik Lundin Date: Sat, 17 Jan 2015 12:04:41 +0100 Subject: [PATCH 092/236] Update OpenBSD rcctl handling to use new syntax. See http://marc.info/?l=openbsd-cvs&m=142054488027109&w=2 --- system/service.py | 69 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 51 insertions(+), 18 deletions(-) diff --git a/system/service.py b/system/service.py index c9ce55d1a37..a1cfde77118 100644 --- a/system/service.py +++ b/system/service.py @@ -991,33 +991,33 @@ class OpenBsdService(Service): if not self.enable_cmd: return super(OpenBsdService, self).service_enable() - rc, stdout, stderr = self.execute_command("%s %s %s" % (self.enable_cmd, 'default', self.name)) + rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'getdef', self.name, 'flags')) if stderr: self.module.fail_json(msg=stderr) - default_string = stdout.rstrip() + getdef_string = stdout.rstrip() # Depending on the service the string returned from 'default' may be # either a set of flags or the boolean YES/NO - if default_string == "YES" or default_string == "NO": + if getdef_string == "YES" or getdef_string == "NO": default_flags = '' else: - default_flags = default_string + default_flags = getdef_string - rc, stdout, stderr = self.execute_command("%s %s %s" % (self.enable_cmd, 'status', self.name)) + rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'get', self.name, 'flags')) if stderr: self.module.fail_json(msg=stderr) - status_string = stdout.rstrip() + get_string = stdout.rstrip() - # Depending on the service the string returned from 'status' may be + # Depending on the service the string returned from 'getdef/get' may be # either a set of flags or the boolean YES/NO - if status_string == "YES" or status_string == "NO": + if get_string == "YES" or get_string == "NO": current_flags = '' else: - current_flags = status_string + current_flags = get_string # If there are arguments from the user we use these as flags unless # they are already set. @@ -1031,29 +1031,62 @@ class OpenBsdService(Service): else: changed_flags = '' + rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'get', self.name, 'status')) + if self.enable: if rc == 0 and not changed_flags: return - action = "enable %s" % (self.name) + if rc != 0: + status_action = "set %s status on" % (self.name) + else: + status_action = '' if changed_flags: - action = action + " flags %s" % (changed_flags) + flags_action = "set %s flags %s" % (self.name, changed_flags) + else: + flags_action = '' else: if rc == 1: return - action = "disable %s" % self.name + status_action = "set %s status off" % self.name + flags_action = '' + + # Verify state assumption + if not status_action and not flags_action: + self.module.fail_json(msg="neither status_action or status_flags is set, this should never happen") if self.module.check_mode: self.module.exit_json(changed=True, msg="changing service enablement") - rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, action)) + status_modified = 0 + if status_action: + rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, status_action)) - if rc != 0: - if stderr: - self.module.fail_json(msg=stderr) - else: - self.module.fail_json(msg="rcctl failed to modify service enablement") + if rc != 0: + if stderr: + self.module.fail_json(msg=stderr) + else: + self.module.fail_json(msg="rcctl failed to modify service status") + + status_modified = 1 + + if flags_action: + rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, flags_action)) + + if rc != 0: + if stderr: + if status_modified: + error_message = "rcctl modified service status but failed to set flags: " + stderr + else: + error_message = stderr + else: + if status_modified: + error_message = "rcctl modified service status but failed to set flags" + else: + error_message = "rcctl failed to modify service flags" + + self.module.fail_json(msg=error_message) self.changed = True From 0794597c7e3ed584a8d3d238393fece94197f32b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 19 Jan 2015 09:32:50 -0500 Subject: [PATCH 093/236] corrected release when this feature was added --- cloud/amazon/ec2_vol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 050863c22fb..7fd58fa5348 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -55,7 +55,7 @@ options: required: false default: standard aliases: [] - version_added: "1.8" + version_added: "1.9" iops: description: - the provisioned IOPs you want to associate with this volume (integer). From d8032ecc8171d24f8034e1f6e2d8976ca4d6c134 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 19 Jan 2015 10:54:22 -0600 Subject: [PATCH 094/236] Use rax_to_dict and make sure to return the volume details after deletion --- cloud/rackspace/rax_cbs.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/cloud/rackspace/rax_cbs.py b/cloud/rackspace/rax_cbs.py index a1b6ce46a6e..b72d757c71f 100644 --- a/cloud/rackspace/rax_cbs.py +++ b/cloud/rackspace/rax_cbs.py @@ -145,10 +145,7 @@ def cloud_block_storage(module, state, name, description, meta, size, attempts=attempts) volume.get() - for key, value in vars(volume).iteritems(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value + instance = rax_to_dict(volume) result = dict(changed=changed, volume=instance) @@ -164,6 +161,7 @@ def cloud_block_storage(module, state, name, description, meta, size, elif state == 'absent': if volume: + instance = rax_to_dict(volume) try: volume.delete() changed = True From f3d8d0f83f5ae13b8d58d758daffb38db4a4c81a Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 19 Jan 2015 10:58:22 -0600 Subject: [PATCH 095/236] Remove some broken and unnecessary required args logic --- cloud/rackspace/rax_cbs.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/cloud/rackspace/rax_cbs.py b/cloud/rackspace/rax_cbs.py index b72d757c71f..261168889cc 100644 --- a/cloud/rackspace/rax_cbs.py +++ b/cloud/rackspace/rax_cbs.py @@ -108,10 +108,6 @@ except ImportError: def cloud_block_storage(module, state, name, description, meta, size, snapshot_id, volume_type, wait, wait_timeout): - for arg in (state, name, size, volume_type): - if not arg: - module.fail_json(msg='%s is required for rax_cbs' % arg) - if size < 100: module.fail_json(msg='"size" must be greater than or equal to 100') From 32ef72df2eafafb6e8b9c5523813717aace86931 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 19 Jan 2015 12:43:30 -0600 Subject: [PATCH 096/236] Small fix-ups to convert objects to dicts, update volume details at the appropriate time, and remove unnecessary required argument logic --- cloud/rackspace/rax_cbs_attachments.py | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/cloud/rackspace/rax_cbs_attachments.py b/cloud/rackspace/rax_cbs_attachments.py index 365f93cd6e2..870b8e611df 100644 --- a/cloud/rackspace/rax_cbs_attachments.py +++ b/cloud/rackspace/rax_cbs_attachments.py @@ -90,11 +90,6 @@ except ImportError: def cloud_block_storage_attachments(module, state, volume, server, device, wait, wait_timeout): - for arg in (state, volume, server, device): - if not arg: - module.fail_json(msg='%s is required for rax_cbs_attachments' % - arg) - cbs = pyrax.cloud_blockstorage cs = pyrax.cloudservers @@ -133,7 +128,7 @@ def cloud_block_storage_attachments(module, state, volume, server, device, not key.startswith('_')): instance[key] = value - result = dict(changed=changed, volume=instance) + result = dict(changed=changed) if volume.status == 'error': result['msg'] = '%s failed to build' % volume.id @@ -142,6 +137,9 @@ def cloud_block_storage_attachments(module, state, volume, server, device, pyrax.utils.wait_until(volume, 'status', 'in-use', interval=5, attempts=attempts) + volume.get() + result['volume'] = rax_to_dict(volume) + if 'msg' in result: module.fail_json(**result) else: @@ -167,12 +165,7 @@ def cloud_block_storage_attachments(module, state, volume, server, device, elif volume.attachments: module.fail_json(msg='Volume is attached to another server') - for key, value in vars(volume).iteritems(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value - - result = dict(changed=changed, volume=instance) + result = dict(changed=changed, volume=rax_to_dict(volume)) if volume.status == 'error': result['msg'] = '%s failed to build' % volume.id From c526a695de9a2d5f1c87567f2898cc72d595b38c Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 19 Jan 2015 13:08:56 -0600 Subject: [PATCH 097/236] Remove unnecessary required arg logic, and remove 'absent' as a valid choice since it isn't implemented --- cloud/rackspace/rax_identity.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/cloud/rackspace/rax_identity.py b/cloud/rackspace/rax_identity.py index ea40ea2ef46..47b4cb60cf0 100644 --- a/cloud/rackspace/rax_identity.py +++ b/cloud/rackspace/rax_identity.py @@ -55,10 +55,6 @@ except ImportError: def cloud_identity(module, state, identity): - for arg in (state, identity): - if not arg: - module.fail_json(msg='%s is required for rax_identity' % arg) - instance = dict( authenticated=identity.authenticated, credentials=identity._creds_file @@ -79,7 +75,7 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - state=dict(default='present', choices=['present', 'absent']) + state=dict(default='present', choices=['present']) ) ) @@ -95,7 +91,7 @@ def main(): setup_rax_module(module, pyrax) - if pyrax.identity is None: + if not pyrax.identity: module.fail_json(msg='Failed to instantiate client. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') @@ -106,5 +102,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.rax import * -### invoke the module +# invoke the module main() From df2088377bc09f899a0e85bb7b6ff278e661c005 Mon Sep 17 00:00:00 2001 From: Alex Clifford Date: Tue, 20 Jan 2015 10:43:22 +1100 Subject: [PATCH 098/236] ttl should always be used during a delete --- cloud/amazon/route53.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 6fb44fcbf0f..8938a728700 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -113,6 +113,7 @@ EXAMPLES = ''' command: delete zone: foo.com record: "{{ rec.set.record }}" + ttl: "{{ rec.set.ttl }}" type: "{{ rec.set.type }}" value: "{{ rec.set.value }}" From f85b7ee13c60642ec33ec8790d91e455387e1306 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 19 Jan 2015 19:37:57 -0500 Subject: [PATCH 099/236] now handles non string values for sysctl --- system/sysctl.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/system/sysctl.py b/system/sysctl.py index 3cf29f9a32b..4517c724ca9 100644 --- a/system/sysctl.py +++ b/system/sysctl.py @@ -185,12 +185,20 @@ class SysctlModule(object): def _parse_value(self, value): if value is None: return '' - elif value.lower() in BOOLEANS_TRUE: - return '1' - elif value.lower() in BOOLEANS_FALSE: - return '0' + elif isinstance(value, bool): + if value: + return '1' + else: + return '0' + elif isinstance(value, basestring): + if value.lower() in BOOLEANS_TRUE: + return '1' + elif value.lower() in BOOLEANS_FALSE: + return '0' + else: + return value.strip() else: - return value.strip() + return value # ============================================================== # SYSCTL COMMAND MANAGEMENT From d4074ccd96ad0f587abe792373b6bff7016ba8c8 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 20 Jan 2015 14:07:35 -0600 Subject: [PATCH 100/236] Remove unnecessary logic, return the database on delete --- cloud/rackspace/rax_cdb_database.py | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/cloud/rackspace/rax_cdb_database.py b/cloud/rackspace/rax_cdb_database.py index 421b6dcb094..cc7885ee31e 100644 --- a/cloud/rackspace/rax_cdb_database.py +++ b/cloud/rackspace/rax_cdb_database.py @@ -79,12 +79,6 @@ def find_database(instance, name): def save_database(module, cdb_id, name, character_set, collate): - - for arg, value in dict(cdb_id=cdb_id, name=name).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_database" ' - 'module' % arg) - cdb = pyrax.cloud_databases try: @@ -111,12 +105,6 @@ def save_database(module, cdb_id, name, character_set, collate): def delete_database(module, cdb_id, name): - - for arg, value in dict(cdb_id=cdb_id, name=name).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_database" ' - 'module' % arg) - cdb = pyrax.cloud_databases try: @@ -136,7 +124,8 @@ def delete_database(module, cdb_id, name): else: changed = True - module.exit_json(changed=changed, action='delete') + module.exit_json(changed=changed, action='delete', + database=rax_to_dict(database)) def rax_cdb_database(module, state, cdb_id, name, character_set, collate): From 347234b937d9054a35dc67bc7bffd96781da7dac Mon Sep 17 00:00:00 2001 From: Ben Konrath Date: Tue, 20 Jan 2015 21:50:15 +0100 Subject: [PATCH 101/236] Add support for SL7 to hostname module. --- system/hostname.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/system/hostname.py b/system/hostname.py index 2ca7479829b..f645a8cdfd3 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -380,12 +380,20 @@ class CentOSLinuxHostname(Hostname): class ScientificHostname(Hostname): platform = 'Linux' distribution = 'Scientific' - strategy_class = RedHatStrategy + distribution_version = get_distribution_version() + if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): + strategy_class = SystemdStrategy + else: + strategy_class = RedHatStrategy class ScientificLinuxHostname(Hostname): platform = 'Linux' distribution = 'Scientific linux' - strategy_class = RedHatStrategy + distribution_version = get_distribution_version() + if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): + strategy_class = SystemdStrategy + else: + strategy_class = RedHatStrategy class AmazonLinuxHostname(Hostname): platform = 'Linux' From 13685fb91b037d092e8e7b36f7ee8ca74a988a5d Mon Sep 17 00:00:00 2001 From: Marek Chodor Date: Wed, 21 Jan 2015 13:37:24 +0100 Subject: [PATCH 102/236] Fixes docker.errors.DockerException: 'dns' parameter has no effect on create_container(). It has been moved to start() --- cloud/docker/docker.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index f71bad42e79..fd1ba63c577 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -726,13 +726,9 @@ class DockerManager(object): 'name': self.module.params.get('name'), 'stdin_open': self.module.params.get('stdin_open'), 'tty': self.module.params.get('tty'), - 'dns': self.module.params.get('dns'), 'volumes_from': self.module.params.get('volumes_from'), } - if params['dns'] is not None: - self.ensure_capability('dns') - if params['volumes_from'] is not None: self.ensure_capability('volumes_from') From 95d4b796c0a7eb6827d49d756467fcc4b4f21c5e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 21 Jan 2015 17:27:40 -0500 Subject: [PATCH 103/236] updated tenancy version added --- cloud/amazon/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 84fa572adab..29c142514c8 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -62,7 +62,7 @@ options: default: null aliases: [] tenancy: - version_added: "1.8" + version_added: "1.9" description: - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are:"default" or "dedicated". NOTE: To use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. required: false From 10aaa1137c410108f38d6314fe97e6f5578881bb Mon Sep 17 00:00:00 2001 From: Doug Ellwanger Date: Mon, 29 Sep 2014 13:02:48 -0700 Subject: [PATCH 104/236] Pull SSL certificate IDs from existing ELBs --- cloud/amazon/ec2_elb_lb.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index d83db113963..51e135dc7ec 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -341,7 +341,7 @@ class ElbManager(object): } if check_elb.listeners: - info['listeners'] = [l.get_complex_tuple() + info['listeners'] = [self._api_listener_as_tuple(l) for l in check_elb.listeners] elif self.status == 'created': # When creating a new ELB, listeners don't show in the @@ -431,7 +431,7 @@ class ElbManager(object): # Since ELB allows only one listener on each incoming port, a # single match on the incomping port is all we're looking for if existing_listener[0] == listener['load_balancer_port']: - existing_listener_found = existing_listener.get_complex_tuple() + existing_listener_found = self._api_listener_as_tuple(existing_listener) break if existing_listener_found: @@ -451,7 +451,7 @@ class ElbManager(object): # Check for any extraneous listeners we need to remove, if desired if self.purge_listeners: for existing_listener in self.elb.listeners: - existing_listener_tuple = existing_listener.get_complex_tuple() + existing_listener_tuple = self._api_listener_as_tuple(existing_listener) if existing_listener_tuple in listeners_to_remove: # Already queued for removal continue @@ -468,6 +468,13 @@ class ElbManager(object): if listeners_to_add: self._create_elb_listeners(listeners_to_add) + def _api_listener_as_tuple(self, listener): + """Adds ssl_certificate_id to ELB API tuple if present""" + base_tuple = listener.get_complex_tuple() + if listener.ssl_certificate_id and len(base_tuple) < 5: + return base_tuple + (listener.ssl_certificate_id,) + return base_tuple + def _listener_as_tuple(self, listener): """Formats listener as a 4- or 5-tuples, in the order specified by the ELB API""" From 0162fc525dc5f8cd67caf663086160efa27707d8 Mon Sep 17 00:00:00 2001 From: Ryan Rawson Date: Tue, 20 Jan 2015 16:32:36 -0800 Subject: [PATCH 105/236] Fixes #581 - digitalocean module cannot create private_networking=true droplets --- cloud/digital_ocean/digital_ocean.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/digital_ocean/digital_ocean.py b/cloud/digital_ocean/digital_ocean.py index efebf5f1bcf..7e0a432c8dc 100644 --- a/cloud/digital_ocean/digital_ocean.py +++ b/cloud/digital_ocean/digital_ocean.py @@ -236,7 +236,8 @@ class Droplet(JsonfyMixIn): @classmethod def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False): - json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking, backups_enabled) + private_networking_lower = str(private_networking).lower() + json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking_lower, backups_enabled) droplet = cls(json) return droplet From 9f333afb6abe24630ba5b6cd2745d3dcc269d712 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 21 Jan 2015 16:14:11 -0800 Subject: [PATCH 106/236] Standardize class names on Archive suffix. This also removes the collision between the stdlib ZipFile class and the module's ZipFile class Fixes #681 --- files/unarchive.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/files/unarchive.py b/files/unarchive.py index db9defb37c4..fc2db0e6907 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -82,7 +82,7 @@ class UnarchiveError(Exception): pass # class to handle .zip files -class ZipFile(object): +class ZipArchive(object): def __init__(self, src, dest, module): self.src = src @@ -123,7 +123,7 @@ class ZipFile(object): # class to handle gzipped tar files -class TgzFile(object): +class TgzArchive(object): def __init__(self, src, dest, module): self.src = src @@ -196,29 +196,29 @@ class TgzFile(object): # class to handle tar files that aren't compressed -class TarFile(TgzFile): +class TarArchive(TgzArchive): def __init__(self, src, dest, module): - super(TarFile, self).__init__(src, dest, module) + super(TarArchive, self).__init__(src, dest, module) self.zipflag = '' # class to handle bzip2 compressed tar files -class TarBzip(TgzFile): +class TarBzipArchive(TgzArchive): def __init__(self, src, dest, module): - super(TarFile, self).__init__(src, dest, module) + super(TarBzipArchive, self).__init__(src, dest, module) self.zipflag = 'j' # class to handle xz compressed tar files -class TarXz(TgzFile): +class TarXzArchive(TgzArchive): def __init__(self, src, dest, module): - super(TarFile, self).__init__(src, dest, module) + super(TarXzArchive, self).__init__(src, dest, module) self.zipflag = 'J' # try handlers in order and return the one that works or bail if none work def pick_handler(src, dest, module): - handlers = [TgzFile, ZipFile, TarFile, TarBzip, TarXz] + handlers = [TgzArchive, ZipArchive, TarArchive, TarBzipArchive, TarXzArchive] for handler in handlers: obj = handler(src, dest, module) if obj.can_handle_archive(): From d66c3fcf5106fb57f606cea92afc71318c7daff0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 21 Jan 2015 19:50:34 -0500 Subject: [PATCH 107/236] moved defaulting to module constant to after when it is defined --- cloud/docker/docker.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index f71bad42e79..b9c379eed4a 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -524,6 +524,8 @@ class DockerManager(object): # connect to docker server docker_url = urlparse(module.params.get('docker_url')) docker_api_version = module.params.get('docker_api_version') + if not docker_api_version: + docker_api_version=docker.client.DEFAULT_DOCKER_API_VERSION self.client = docker.Client(base_url=docker_url.geturl(), version=docker_api_version) self.docker_py_versioninfo = get_docker_py_versioninfo() @@ -845,7 +847,7 @@ def main(): memory_limit = dict(default=0), memory_swap = dict(default=0), docker_url = dict(default='unix://var/run/docker.sock'), - docker_api_version = dict(default=docker.client.DEFAULT_DOCKER_API_VERSION), + docker_api_version = dict(), username = dict(default=None), password = dict(), email = dict(), From 9e1847ed033a8b283940fbb16660ffe06b14316d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 21 Jan 2015 20:09:46 -0500 Subject: [PATCH 108/236] ec2 fix docs format --- cloud/amazon/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 29c142514c8..93b496cb5e8 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -64,7 +64,7 @@ options: tenancy: version_added: "1.9" description: - - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are:"default" or "dedicated". NOTE: To use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. + - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are "default" or "dedicated". Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. required: false default: default aliases: [] From c3a0e8a7a4ae3efde08078df1c4bde3cd8abde77 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 22 Jan 2015 18:05:54 -0800 Subject: [PATCH 109/236] Don't fial if virtualenv is not installed and we do not need to initialize the virtualenv Fixes #688 --- packaging/language/pip.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index 17f52c00398..97576a5258b 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -98,7 +98,7 @@ options: required: false default: null notes: - - Please note that virtualenv (U(http://www.virtualenv.org/)) must be installed on the remote host if the virtualenv parameter is specified. + - Please note that virtualenv (U(http://www.virtualenv.org/)) must be installed on the remote host if the virtualenv parameter is specified and the virtualenv needs to be initialized. requirements: [ "virtualenv", "pip" ] author: Matt Wright ''' @@ -252,12 +252,14 @@ def main(): if env: env = os.path.expanduser(env) - virtualenv = os.path.expanduser(virtualenv_command) - if os.path.basename(virtualenv) == virtualenv: - virtualenv = module.get_bin_path(virtualenv_command, True) if not os.path.exists(os.path.join(env, 'bin', 'activate')): if module.check_mode: module.exit_json(changed=True) + + virtualenv = os.path.expanduser(virtualenv_command) + if os.path.basename(virtualenv) == virtualenv: + virtualenv = module.get_bin_path(virtualenv_command, True) + if module.params['virtualenv_site_packages']: cmd = '%s --system-site-packages %s' % (virtualenv, env) else: @@ -278,7 +280,7 @@ def main(): pip = _get_pip(module, env, module.params['executable']) cmd = '%s %s' % (pip, state_map[state]) - + # If there's a virtualenv we want things we install to be able to use other # installations that exist as binaries within this virtualenv. Example: we # install cython and then gevent -- gevent needs to use the cython binary, @@ -308,7 +310,7 @@ def main(): cmd += ' %s' % _get_full_name(name, version) elif requirements: cmd += ' -r %s' % requirements - + this_dir = tempfile.gettempdir() if chdir: this_dir = os.path.join(this_dir, chdir) @@ -319,7 +321,7 @@ def main(): elif name.startswith('svn+') or name.startswith('git+') or \ name.startswith('hg+') or name.startswith('bzr+'): module.exit_json(changed=True) - + freeze_cmd = '%s freeze' % pip rc, out_pip, err_pip = module.run_command(freeze_cmd, cwd=this_dir) From 4f1f8e89c9c380c416badefb48154e40ade91afc Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 22 Jan 2015 21:55:27 -0800 Subject: [PATCH 110/236] More information about distutils on Solaris in case we run into this again --- system/service.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/system/service.py b/system/service.py index 108427bb954..321950cb73b 100644 --- a/system/service.py +++ b/system/service.py @@ -107,6 +107,9 @@ import time import string # The distutils module is not shipped with SUNWPython on Solaris. +# It's in the SUNWPython-devel package which also contains development files +# that don't belong on production boxes. Since our Solaris code doesn't +# depend on LooseVersion, do not import it on Solaris. if platform.system() != 'SunOS': from distutils.version import LooseVersion From d2c6791082b93890ce8ff83b5a2e811139228a99 Mon Sep 17 00:00:00 2001 From: calmera Date: Tue, 20 Jan 2015 08:27:52 +0100 Subject: [PATCH 111/236] Update docker.py Added some more documentation for the memory_limit and volume options. --- cloud/docker/docker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index b9c379eed4a..00e5b40f80f 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -77,7 +77,7 @@ options: version_added: "1.5" volumes: description: - - Set volume(s) to mount on the container + - Set volume(s) to mount on the container separated with a comma (,) and in the format "source:dest[:rights]" required: false default: null aliases: [] @@ -96,11 +96,11 @@ options: version_added: "1.5" memory_limit: description: - - Set RAM allocated to container + - Set RAM allocated to container. It whould be passed as an amount of bytes. For example 1048576 = 1Gb required: false default: null aliases: [] - default: 256MB + default: 262144 docker_url: description: - URL of docker host to issue commands to From 670098af2d5b3351382f82848fcc8fdb5744c8f8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 22 Jan 2015 23:05:35 -0800 Subject: [PATCH 112/236] Spelling and grammar fix --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 00e5b40f80f..1957c2d4db0 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -96,7 +96,7 @@ options: version_added: "1.5" memory_limit: description: - - Set RAM allocated to container. It whould be passed as an amount of bytes. For example 1048576 = 1Gb + - Set RAM allocated to container. It will be passed as a number of bytes. For example 1048576 = 1Gb required: false default: null aliases: [] From 80a5500a2315c97f35ebf7617ca37edf4bc85237 Mon Sep 17 00:00:00 2001 From: sysadmin75 Date: Fri, 23 Jan 2015 18:10:00 -0500 Subject: [PATCH 113/236] Fix #10059 - replace module does not obey follow=yes --- files/replace.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/files/replace.py b/files/replace.py index b7b75a9604d..588af02391e 100644 --- a/files/replace.py +++ b/files/replace.py @@ -152,6 +152,8 @@ def main(): if changed and not module.check_mode: if params['backup'] and os.path.exists(dest): module.backup_local(dest) + if params['follow'] and os.path.islink(dest): + dest = os.path.realpath(dest) write_changes(module, result[0], dest) msg, changed = check_file_attrs(module, changed, msg) From 01c4ff922f4128cc7f8acefd2590142d2162346f Mon Sep 17 00:00:00 2001 From: Joel Thompson Date: Sat, 24 Jan 2015 00:52:37 -0500 Subject: [PATCH 114/236] Fixes issue 697 -- only purge the grants that need to be purged --- cloud/amazon/ec2_group.py | 44 +++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/cloud/amazon/ec2_group.py b/cloud/amazon/ec2_group.py index 59623e96d64..b502bd1db53 100644 --- a/cloud/amazon/ec2_group.py +++ b/cloud/amazon/ec2_group.py @@ -128,7 +128,7 @@ def make_rule_key(prefix, rule, group_id, cidr_ip): def addRulesToLookup(rules, prefix, dict): for rule in rules: for grant in rule.grants: - dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = rule + dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = (rule, grant) def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id): @@ -304,14 +304,13 @@ def main(): # Finally, remove anything left in the groupRules -- these will be defunct rules if purge_rules: - for rule in groupRules.itervalues() : - for grant in rule.grants: - grantGroup = None - if grant.group_id: - grantGroup = groups[grant.group_id] - if not module.check_mode: - group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup) - changed = True + for (rule, grant) in groupRules.itervalues() : + grantGroup = None + if grant.group_id: + grantGroup = groups[grant.group_id] + if not module.check_mode: + group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup) + changed = True # Manage egress rules groupRules = {} @@ -369,20 +368,19 @@ def main(): # Finally, remove anything left in the groupRules -- these will be defunct rules if purge_rules_egress: - for rule in groupRules.itervalues(): - for grant in rule.grants: - grantGroup = None - if grant.group_id: - grantGroup = groups[grant.group_id].id - if not module.check_mode: - ec2.revoke_security_group_egress( - group_id=group.id, - ip_protocol=rule.ip_protocol, - from_port=rule.from_port, - to_port=rule.to_port, - src_group_id=grantGroup, - cidr_ip=grant.cidr_ip) - changed = True + for (rule, grant) in groupRules.itervalues(): + grantGroup = None + if grant.group_id: + grantGroup = groups[grant.group_id].id + if not module.check_mode: + ec2.revoke_security_group_egress( + group_id=group.id, + ip_protocol=rule.ip_protocol, + from_port=rule.from_port, + to_port=rule.to_port, + src_group_id=grantGroup, + cidr_ip=grant.cidr_ip) + changed = True if group: module.exit_json(changed=changed, group_id=group.id) From d257e2be8cd06d41be1c223c37bc5d8df93ac545 Mon Sep 17 00:00:00 2001 From: Conrado Buhrer Date: Mon, 26 Jan 2015 12:56:35 -0200 Subject: [PATCH 115/236] added: build-dep to apt module #349 Original code provided by @nathanhruby --- packaging/os/apt.py | 46 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 9f5b8fd4cda..02576ef1a74 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -82,6 +82,12 @@ options: required: false default: "yes" choices: [ "yes", "safe", "full", "dist"] + build_dep: + description: + - Instead, install the build dependencies for the named pkg (equivalent to 'apt-get build-dep foo') + required: false + default: "no" + choises: [ "yes", "no" ] dpkg_options: description: - Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"' @@ -133,6 +139,9 @@ EXAMPLES = ''' # Install a .deb package - apt: deb=/tmp/mypackage.deb + +# Install the build dependencies for package "foo" +- apt: pkg=foo build_dep=yes ''' @@ -484,6 +493,37 @@ def upgrade(m, mode="yes", force=False, default_release=None, m.exit_json(changed=False, msg=out, stdout=out, stderr=err) m.exit_json(changed=True, msg=out, stdout=out, stderr=err) +def build_dep(m, pkgspec, cache, force=False, + dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): + if m.check_mode: + check_arg = '--simulate' + else: + check_arg = '' + + if force: + force_yes = '--force-yes' + else: + force_yes = '' + + packages = '' + pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) + for package in pkgspec: + name, version = package_split(package) + packages += "'%s' " % package + if len(packages) == 0: + m.exit_json(changed=False) + else: + for (k,v) in APT_ENV_VARS.iteritems(): + os.environ[k] = v + apt_cmd_path = m.get_bin_path(APT_GET_CMD, required=True) + cmd = '%s -y %s %s %s build-dep %s' % (apt_cmd_path, dpkg_options, force_yes, check_arg, packages) + rc, out, err = m.run_command(cmd) + if rc: + m.fail_json(msg="'%s' failed: %s" % (cmd, err), stdout=out) + if APT_GET_ZERO in out: + m.exit_json(changed=False, msg=out, stdout=out, stderr=err) + m.exit_json(changed=True, msg=out, stdout=out, stderr=err) + def main(): module = AnsibleModule( argument_spec = dict( @@ -497,10 +537,11 @@ def main(): install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'), force = dict(default='no', type='bool'), upgrade = dict(choices=['yes', 'safe', 'full', 'dist']), + build_dep = dict(default='no', type='bool'), dpkg_options = dict(default=DPKG_OPTIONS) ), mutually_exclusive = [['package', 'upgrade', 'deb']], - required_one_of = [['package', 'upgrade', 'update_cache', 'deb']], + required_one_of = [['package', 'upgrade', 'update_cache', 'build-dep', 'deb']], supports_check_mode = True ) @@ -595,6 +636,9 @@ def main(): if latest and '=' in package: module.fail_json(msg='version number inconsistent with state=latest: %s' % package) + if p['build_dep']: + build_dep(module, packages, cache, force=force_yes, dpkg_options=dpkg_options) + if p['state'] == 'latest': result = install(module, packages, cache, upgrade=True, default_release=p['default_release'], From 34aa98a99cb5a0c0b966f54927cfc3a4cbe08bcc Mon Sep 17 00:00:00 2001 From: Johannes Steger Date: Mon, 26 Jan 2015 14:04:53 +0100 Subject: [PATCH 116/236] Fix function identifier quoting --- database/postgresql/postgresql_privs.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/database/postgresql/postgresql_privs.py b/database/postgresql/postgresql_privs.py index 9b9d94923bc..22a565f6b65 100644 --- a/database/postgresql/postgresql_privs.py +++ b/database/postgresql/postgresql_privs.py @@ -474,10 +474,13 @@ class Connection(object): if obj_type == 'group': set_what = ','.join(pg_quote_identifier(i, 'role') for i in obj_ids) else: + # function types are already quoted above + if obj_type != 'function': + obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids] # Note: obj_type has been checked against a set of string literals # and privs was escaped when it was parsed set_what = '%s ON %s %s' % (','.join(privs), obj_type, - ','.join(pg_quote_identifier(i, 'table') for i in obj_ids)) + ','.join(obj_ids)) # for_whom: SQL-fragment specifying for whom to set the above if roles == 'PUBLIC': From 711005fe9b61b3ba533928ba68203a08eb53d2a4 Mon Sep 17 00:00:00 2001 From: Conrado Buhrer Date: Mon, 26 Jan 2015 15:36:35 -0200 Subject: [PATCH 117/236] changed: state=build-dep; refactored into install() --- packaging/os/apt.py | 78 +++++++++++---------------------------------- 1 file changed, 19 insertions(+), 59 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 02576ef1a74..b629a762c1f 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -34,10 +34,10 @@ options: default: null state: description: - - Indicates the desired package state. C(latest) ensures that the latest version is installed. + - Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies are installed. required: false default: present - choices: [ "latest", "absent", "present" ] + choices: [ "latest", "absent", "present", "build-dep" ] update_cache: description: - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step. @@ -82,12 +82,6 @@ options: required: false default: "yes" choices: [ "yes", "safe", "full", "dist"] - build_dep: - description: - - Instead, install the build dependencies for the named pkg (equivalent to 'apt-get build-dep foo') - required: false - default: "no" - choises: [ "yes", "no" ] dpkg_options: description: - Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"' @@ -141,7 +135,7 @@ EXAMPLES = ''' - apt: deb=/tmp/mypackage.deb # Install the build dependencies for package "foo" -- apt: pkg=foo build_dep=yes +- apt: pkg=foo state=build-dep ''' @@ -308,13 +302,18 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache): def install(m, pkgspec, cache, upgrade=False, default_release=None, install_recommends=True, force=False, - dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): + dpkg_options=expand_dpkg_options(DPKG_OPTIONS), + build_dep=False): pkg_list = [] packages = "" pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) for package in pkgspec: name, version = package_split(package) installed, upgradable, has_files = package_status(m, name, version, cache, state='install') + if build_dep: + # Let apt decide what to install + pkg_list.append("'%s'" % package) + continue if not installed or (upgrade and upgradable): pkg_list.append("'%s'" % package) if installed and upgradable and version: @@ -341,7 +340,10 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None, for (k,v) in APT_ENV_VARS.iteritems(): os.environ[k] = v - cmd = "%s -y %s %s %s install %s" % (APT_GET_CMD, dpkg_options, force_yes, check_arg, packages) + if build_dep: + cmd = "%s -y %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, force_yes, check_arg, packages) + else: + cmd = "%s -y %s %s %s install %s" % (APT_GET_CMD, dpkg_options, force_yes, check_arg, packages) if default_release: cmd += " -t '%s'" % (default_release,) @@ -350,7 +352,7 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None, rc, out, err = m.run_command(cmd) if rc: - return (False, dict(msg="'apt-get install %s' failed: %s" % (packages, err), stdout=out, stderr=err)) + return (False, dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err)) else: return (True, dict(changed=True, stdout=out, stderr=err)) else: @@ -493,41 +495,10 @@ def upgrade(m, mode="yes", force=False, default_release=None, m.exit_json(changed=False, msg=out, stdout=out, stderr=err) m.exit_json(changed=True, msg=out, stdout=out, stderr=err) -def build_dep(m, pkgspec, cache, force=False, - dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): - if m.check_mode: - check_arg = '--simulate' - else: - check_arg = '' - - if force: - force_yes = '--force-yes' - else: - force_yes = '' - - packages = '' - pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) - for package in pkgspec: - name, version = package_split(package) - packages += "'%s' " % package - if len(packages) == 0: - m.exit_json(changed=False) - else: - for (k,v) in APT_ENV_VARS.iteritems(): - os.environ[k] = v - apt_cmd_path = m.get_bin_path(APT_GET_CMD, required=True) - cmd = '%s -y %s %s %s build-dep %s' % (apt_cmd_path, dpkg_options, force_yes, check_arg, packages) - rc, out, err = m.run_command(cmd) - if rc: - m.fail_json(msg="'%s' failed: %s" % (cmd, err), stdout=out) - if APT_GET_ZERO in out: - m.exit_json(changed=False, msg=out, stdout=out, stderr=err) - m.exit_json(changed=True, msg=out, stdout=out, stderr=err) - def main(): module = AnsibleModule( argument_spec = dict( - state = dict(default='present', choices=['installed', 'latest', 'removed', 'absent', 'present']), + state = dict(default='present', choices=['installed', 'latest', 'removed', 'absent', 'present', 'build-dep']), update_cache = dict(default=False, aliases=['update-cache'], type='bool'), cache_valid_time = dict(type='int'), purge = dict(default=False, type='bool'), @@ -537,7 +508,6 @@ def main(): install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'), force = dict(default='no', type='bool'), upgrade = dict(choices=['yes', 'safe', 'full', 'dist']), - build_dep = dict(default='no', type='bool'), dpkg_options = dict(default=DPKG_OPTIONS) ), mutually_exclusive = [['package', 'upgrade', 'deb']], @@ -636,23 +606,13 @@ def main(): if latest and '=' in package: module.fail_json(msg='version number inconsistent with state=latest: %s' % package) - if p['build_dep']: - build_dep(module, packages, cache, force=force_yes, dpkg_options=dpkg_options) - - if p['state'] == 'latest': + if p['state'] in ('latest', 'present', 'build-dep'): + build_dep = p.get('build-dep', False) result = install(module, packages, cache, upgrade=True, default_release=p['default_release'], install_recommends=install_recommends, - force=force_yes, dpkg_options=dpkg_options) - (success, retvals) = result - if success: - module.exit_json(**retvals) - else: - module.fail_json(**retvals) - elif p['state'] == 'present': - result = install(module, packages, cache, default_release=p['default_release'], - install_recommends=install_recommends,force=force_yes, - dpkg_options=dpkg_options) + force=force_yes, dpkg_options=dpkg_options, + build_dep=build_dep) (success, retvals) = result if success: module.exit_json(**retvals) From 99a8caf40943ac6b8ea0cce0488d9d3e1d929287 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 26 Jan 2015 09:45:59 -0800 Subject: [PATCH 118/236] Prefer gtar to tar Fixes #702 --- files/unarchive.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/files/unarchive.py b/files/unarchive.py index fc2db0e6907..8e9c90fedcc 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -129,7 +129,11 @@ class TgzArchive(object): self.src = src self.dest = dest self.module = module - self.cmd_path = self.module.get_bin_path('tar') + # Prefer gtar (GNU tar) as it supports the compression options -zjJ + self.cmd_path = self.module.get_bin_path('gtar', None) + if not self.cmd_path: + # Fallback to tar + self.cmd_path = self.module.get_bin_path('tar') self.zipflag = 'z' self._files_in_archive = [] From ef3f486f6e88ddb2770789c3c8a793042d5bbaec Mon Sep 17 00:00:00 2001 From: Conrado Buhrer Date: Mon, 26 Jan 2015 17:32:34 -0200 Subject: [PATCH 119/236] fixed: removed build-deb from required_one_of --- packaging/os/apt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index b629a762c1f..e00ec4b98d7 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -511,7 +511,7 @@ def main(): dpkg_options = dict(default=DPKG_OPTIONS) ), mutually_exclusive = [['package', 'upgrade', 'deb']], - required_one_of = [['package', 'upgrade', 'update_cache', 'build-dep', 'deb']], + required_one_of = [['package', 'upgrade', 'update_cache', 'deb']], supports_check_mode = True ) From 3155656eef66d94855b8548a682250a64ae0710e Mon Sep 17 00:00:00 2001 From: Conrado Buhrer Date: Mon, 26 Jan 2015 17:48:03 -0200 Subject: [PATCH 120/236] fixed: missing check for upgrade flag --- packaging/os/apt.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index e00ec4b98d7..93809602afa 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -608,7 +608,8 @@ def main(): if p['state'] in ('latest', 'present', 'build-dep'): build_dep = p.get('build-dep', False) - result = install(module, packages, cache, upgrade=True, + upgrade = p.get('latest', False) + result = install(module, packages, cache, upgrade=upgrade, default_release=p['default_release'], install_recommends=install_recommends, force=force_yes, dpkg_options=dpkg_options, From a5114bfa398ec5c1c39f3c36dcf9368075d5a312 Mon Sep 17 00:00:00 2001 From: Conrado Buhrer Date: Mon, 26 Jan 2015 18:16:42 -0200 Subject: [PATCH 121/236] fixed: erroneous logic --- packaging/os/apt.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 93809602afa..90d1716e2ca 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -607,8 +607,12 @@ def main(): module.fail_json(msg='version number inconsistent with state=latest: %s' % package) if p['state'] in ('latest', 'present', 'build-dep'): - build_dep = p.get('build-dep', False) - upgrade = p.get('latest', False) + upgrade = False + build_dep = False + if p['state'] == 'latest': + upgrade = True + if p['state'] == 'build-dep': + build_dep = True result = install(module, packages, cache, upgrade=upgrade, default_release=p['default_release'], install_recommends=install_recommends, From bd208da8ea9a386f3f0581332216e867d0bc7a3b Mon Sep 17 00:00:00 2001 From: Conrado Buhrer Date: Tue, 27 Jan 2015 13:28:56 -0200 Subject: [PATCH 122/236] fixed: namespace clash #710 --- packaging/os/apt.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 90d1716e2ca..06f96f3f1d5 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -607,17 +607,17 @@ def main(): module.fail_json(msg='version number inconsistent with state=latest: %s' % package) if p['state'] in ('latest', 'present', 'build-dep'): - upgrade = False - build_dep = False + state_upgrade = False + state_builddep = False if p['state'] == 'latest': - upgrade = True + state_upgrade = True if p['state'] == 'build-dep': - build_dep = True - result = install(module, packages, cache, upgrade=upgrade, + state_builddep = True + result = install(module, packages, cache, upgrade=state_upgrade, default_release=p['default_release'], install_recommends=install_recommends, force=force_yes, dpkg_options=dpkg_options, - build_dep=build_dep) + build_dep=state_builddep) (success, retvals) = result if success: module.exit_json(**retvals) From 98f0c0424aff0e333d52b60edf636b934ededcf9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 27 Jan 2015 09:42:38 -0800 Subject: [PATCH 123/236] Remove str.format() usage for python2.4 compat. Fixes #10036 --- commands/command.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/commands/command.py b/commands/command.py index 2b79b327d71..131fc4c7ffc 100644 --- a/commands/command.py +++ b/commands/command.py @@ -127,9 +127,8 @@ OPTIONS = {'chdir': None, # the line is reached OPTIONS_REGEX = '|'.join(OPTIONS.keys()) PARAM_REGEX = re.compile( - r'(^|\s)({options_regex})=(?P[\'"])?(.*?)(?(quote)(?[\'"])?(.*?)(?(quote)(? Date: Tue, 27 Jan 2015 15:06:55 -0600 Subject: [PATCH 124/236] Added documentation for using url as key source --- system/authorized_key.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index 898f74b575b..c5d19521813 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -37,7 +37,7 @@ options: aliases: [] key: description: - - The SSH public key, as a string + - The SSH public key(s), as a string or url (https://github.com/username.keys) required: true default: null path: @@ -79,6 +79,9 @@ EXAMPLES = ''' # Example using key data from a local file on the management machine - authorized_key: user=charlie key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" +# Using github url as key source +- authorized_key: user=charlie key=https://github.com/charlie.keys + # Using alternate directory locations: - authorized_key: user=charlie key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" @@ -97,6 +100,7 @@ EXAMPLES = ''' - authorized_key: user=charlie key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" key_options='no-port-forwarding,host="10.0.1.1"' + ''' # Makes sure the public key line is present or absent in the user's .ssh/authorized_keys. From 543c45a15f84d36aa168674d417d2b916c14180e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 28 Jan 2015 09:22:32 -0500 Subject: [PATCH 125/236] implemented account expiration as flag and not state, removed from unsupported platforms --- system/user.py | 98 ++++++++++++++++++++++---------------------------- 1 file changed, 42 insertions(+), 56 deletions(-) diff --git a/system/user.py b/system/user.py index e05ef3b9db0..804ff5b9bd6 100644 --- a/system/user.py +++ b/system/user.py @@ -84,12 +84,9 @@ options: state: required: false default: "present" - choices: [ present, absent, expired ] + choices: [ present, absent ] description: - - Whether the account should exist, and whether it is expired. - When C(absent), removes the user account. - When C(expired), the user will not be able to login through any means. - Expired state is only implemented for Linux. + - Whether the account should exist or not, taking action if the state is different from what is stated. createhome: required: false default: "yes" @@ -97,7 +94,7 @@ options: description: - Unless set to C(no), a home directory will be made for the user when the account is created or if the home directory does not - exist. + exist. move_home: required: false default: "no" @@ -180,6 +177,13 @@ options: version_added: "1.3" description: - C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users. + expires: + version_added: "1.9" + required: false + default: "None" + description: + - An expiry time for the user in epoch, it will be ignored on platforms that do not support this. + Currently supported on Linux and FreeBSD. ''' EXAMPLES = ''' @@ -194,6 +198,9 @@ EXAMPLES = ''' # Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa - user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa + +# added a consultant who's account you want to expire +- user: name=james18 shell=/bin/zsh groups=developers expires=1422403387 ''' import os @@ -202,6 +209,7 @@ import grp import syslog import platform import socket +import time try: import spwd @@ -229,6 +237,7 @@ class User(object): platform = 'Generic' distribution = None SHADOWFILE = '/etc/shadow' + DATE_FORMAT = '%Y-%M-%d' def __new__(cls, *args, **kwargs): return load_platform_subclass(User, args, kwargs) @@ -258,6 +267,14 @@ class User(object): self.ssh_comment = module.params['ssh_key_comment'] self.ssh_passphrase = module.params['ssh_key_passphrase'] self.update_password = module.params['update_password'] + self.expires = None + + if module.params['expires']: + try: + self.expires = time.gmtime(module.params['expires']) + except Exception,e: + module.fail_json("Invalid expires time %s: %s" %(self.expires, str(e))) + if module.params['ssh_key_file'] is not None: self.ssh_file = module.params['ssh_key_file'] else: @@ -266,6 +283,7 @@ class User(object): # select whether we dump additional debug info through syslog self.syslogging = False + def execute_command(self, cmd, use_unsafe_shell=False, data=None): if self.syslogging: syslog.openlog('ansible-%s' % os.path.basename(__file__)) @@ -330,9 +348,9 @@ class User(object): cmd.append('-s') cmd.append(self.shell) - if self.state == 'expired': + if self.expires: cmd.append('--expiredate') - cmd.append('1') + cmd.append(time.strftime(self.DATE_FORMAT, self.expires)) if self.password is not None: cmd.append('-p') @@ -440,9 +458,9 @@ class User(object): cmd.append('-s') cmd.append(self.shell) - if self.state == 'expired': + if self.expires: cmd.append('--expiredate') - cmd.append('1') + cmd.append(time.strftime(self.DATE_FORMAT, self.expires)) if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd.append('-p') @@ -548,7 +566,7 @@ class User(object): if not os.path.exists(info[5]): return (1, '', 'User %s home directory does not exist' % self.name) ssh_key_file = self.get_ssh_key_path() - ssh_dir = os.path.dirname(ssh_key_file) + ssh_dir = os.path.dirname(ssh_key_file) if not os.path.exists(ssh_dir): try: os.mkdir(ssh_dir, 0700) @@ -637,7 +655,7 @@ class User(object): os.chown(os.path.join(root, f), uid, gid) except OSError, e: self.module.exit_json(failed=True, msg="%s" % e) - + # =========================================== @@ -714,9 +732,10 @@ class FreeBsdUser(User): cmd.append('-L') cmd.append(self.login_class) - if self.state == 'expired': + if self.expires: + days =( time.mktime(self.expires) - time.time() ) / 86400 cmd.append('-e') - cmd.append('1970-01-01') + cmd.append(str(int(days))) # system cannot be handled currently - should we error if its requested? # create the user @@ -730,7 +749,7 @@ class FreeBsdUser(User): self.module.get_bin_path('chpass', True), '-p', self.password, - self.name + self.name ] return self.execute_command(cmd) @@ -741,7 +760,7 @@ class FreeBsdUser(User): self.module.get_bin_path('pw', True), 'usermod', '-n', - self.name + self.name ] cmd_len = len(cmd) info = self.user_info() @@ -802,9 +821,10 @@ class FreeBsdUser(User): new_groups = groups | set(current_groups) cmd.append(','.join(new_groups)) - if self.state == 'expired': + if self.expires: + days = ( time.mktime(self.expires) - time.time() ) / 86400 cmd.append('-e') - cmd.append('1970-01-01') + cmd.append(str(int(days))) # modify the user if cmd will do anything if cmd_len != len(cmd): @@ -882,10 +902,6 @@ class OpenBSDUser(User): cmd.append('-L') cmd.append(self.login_class) - if self.state == 'expired': - cmd.append('-e') - cmd.append('1') - if self.password is not None: cmd.append('-p') cmd.append(self.password) @@ -980,10 +996,6 @@ class OpenBSDUser(User): cmd.append('-L') cmd.append(self.login_class) - if self.state == 'expired': - cmd.append('-e') - cmd.append('1') - if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd.append('-p') cmd.append(self.password) @@ -1057,10 +1069,6 @@ class NetBSDUser(User): cmd.append('-L') cmd.append(self.login_class) - if self.state == 'expired': - cmd.append('-e') - cmd.append('1') - if self.password is not None: cmd.append('-p') cmd.append(self.password) @@ -1143,10 +1151,6 @@ class NetBSDUser(User): cmd.append('-L') cmd.append(self.login_class) - if self.state == 'expired': - cmd.append('-e') - cmd.append('1') - if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd.append('-p') cmd.append(self.password) @@ -1224,10 +1228,6 @@ class SunOS(User): if self.createhome: cmd.append('-m') - if self.state == 'expired': - cmd.append('-e') - cmd.append('1/1/70') - cmd.append(self.name) if self.module.check_mode: @@ -1312,10 +1312,6 @@ class SunOS(User): cmd.append('-s') cmd.append(self.shell) - if self.state == 'expired': - cmd.append('-e') - cmd.append('1/1/70') - if self.module.check_mode: return (0, '', '') else: @@ -1405,10 +1401,6 @@ class AIX(User): if self.createhome: cmd.append('-m') - if self.state == 'expired': - cmd.append('-e') - cmd.append('0101000070') - cmd.append(self.name) (rc, out, err) = self.execute_command(cmd) @@ -1477,10 +1469,6 @@ class AIX(User): cmd.append('-s') cmd.append(self.shell) - if self.state == 'expired': - cmd.append('-e') - cmd.append('0101000070') - # skip if no changes to be made if len(cmd) == 1: (rc, out, err) = (None, '', '') @@ -1516,7 +1504,7 @@ def main(): } module = AnsibleModule( argument_spec = dict( - state=dict(default='present', choices=['present', 'absent', 'expired'], type='str'), + state=dict(default='present', choices=['present', 'absent'], type='str'), name=dict(required=True, aliases=['user'], type='str'), uid=dict(default=None, type='str'), non_unique=dict(default='no', type='bool'), @@ -1543,7 +1531,8 @@ def main(): ssh_key_file=dict(default=None, type='str'), ssh_key_comment=dict(default=ssh_defaults['comment'], type='str'), ssh_key_passphrase=dict(default=None, type='str'), - update_password=dict(default='always',choices=['always','on_create'],type='str') + update_password=dict(default='always',choices=['always','on_create'],type='str'), + expires=dict(default=None, type='float'), ), supports_check_mode=True ) @@ -1571,10 +1560,7 @@ def main(): module.fail_json(name=user.name, msg=err, rc=rc) result['force'] = user.force result['remove'] = user.remove - elif user.state == 'expired' and user.platform != 'Generic': - module.fail_json(name=user.state, - msg='expired state not yet support for {0} platform'.format(user.platform)) - elif user.state == 'present' or user.state == 'expired': + elif user.state == 'present': if not user.user_exists(): if module.check_mode: module.exit_json(changed=True) From 7b40f63a4da4696892e905372e3f175862d47006 Mon Sep 17 00:00:00 2001 From: AlejandroF Date: Wed, 28 Jan 2015 11:29:29 -0300 Subject: [PATCH 126/236] Example added How to define specific version of the package we need... --- packaging/os/yum.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 65d5b43b07c..744fc1179fe 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -122,6 +122,9 @@ EXAMPLES = ''' - name: install the latest version of Apache from the testing repo yum: name=httpd enablerepo=testing state=present +- name: install one specific version of Apache + yum: name=httpd-2.2.29-1.4.amzn1 state=present + - name: upgrade all packages yum: name=* state=latest From a28ad0db83a1591b33d763054a5fbac2984eae8f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 28 Jan 2015 12:35:31 -0500 Subject: [PATCH 127/236] minor fixes on new rds refactor --- cloud/amazon/rds.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 96e4848e056..7f17c1ee1bd 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -251,7 +251,7 @@ EXAMPLES = ''' # Basic mysql provisioning example - rds: command: create - instance_name: new_database + instance_name: new-database db_engine: MySQL size: 10 instance_type: db.m1.small @@ -262,9 +262,9 @@ EXAMPLES = ''' Application: cms # Create a read-only replica and wait for it to become available -- rds: +- rds: command: replicate - instance_name: new_database_replica + instance_name: new-database-replica source_instance: new_database wait: yes wait_timeout: 600 @@ -272,20 +272,20 @@ EXAMPLES = ''' # Delete an instance, but create a snapshot before doing so - rds: command: delete - instance_name: new_database + instance_name: new-database snapshot: new_database_snapshot # Get facts about an instance - rds: command: facts - instance_name: new_database + instance_name: new-database register: new_database_facts # Rename an instance and wait for the change to take effect - rds: command: modify - instance_name: new_database - new_instance_name: renamed_database + instance_name: new-database + new_instance_name: renamed-database wait: yes ''' @@ -392,7 +392,7 @@ class RDSConnection: def promote_read_replica(self, instance_name, **params): try: result = self.connection.promote_read_replica(instance_name, **params) - return RDSInstance(result) + return RDSDBInstance(result) except boto.exception.BotoServerError, e: raise RDSException(e) @@ -652,7 +652,7 @@ def create_db_instance(module, conn): module.params.get('username'), module.params.get('password'), **params) changed = True except RDSException, e: - module.fail_json(msg=e.message) + module.fail_json(msg="failed to create instance: %s" % e.message) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) @@ -679,7 +679,7 @@ def replicate_db_instance(module, conn): result = conn.create_db_instance_read_replica(instance_name, source_instance, **params) changed = True except RDSException, e: - module.fail_json(msg=e.message) + module.fail_json(msg="failed to create replica instance: %s " % e.message) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) @@ -715,7 +715,7 @@ def delete_db_instance_or_snapshot(module, conn): else: result = conn.delete_db_snapshot(snapshot) except RDSException, e: - module.fail_json(msg=e.message) + module.fail_json(msg="failed to delete instance: %s" % e.message) # If we're not waiting for a delete to complete then we're all done # so just return From e2083bbe8a6baf63a363617f5b4590a3a8ff6e89 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 28 Jan 2015 12:55:05 -0500 Subject: [PATCH 128/236] corrected version added for latest features --- cloud/amazon/rds.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 7f17c1ee1bd..879143c03f6 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -226,21 +226,21 @@ options: required: false default: null aliases: [] - version_added: 1.8 + version_added: 1.9 publicly_accessible: description: - explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0 required: false default: null aliases: [] - version_added: 1.8 + version_added: 1.9 tags: description: - tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0 required: false default: null aliases: [] - version_added: 1.8 + version_added: 1.9 requirements: [ "boto" ] author: Bruce Pennypacker, Will Thames ''' From 6350e6a4cc8f01294c1acd00d8072b50026a63fe Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 28 Jan 2015 16:19:00 -0800 Subject: [PATCH 129/236] Update force documentation to reflect default=no --- source_control/subversion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source_control/subversion.py b/source_control/subversion.py index 8f6d81e5694..052afd9bb35 100644 --- a/source_control/subversion.py +++ b/source_control/subversion.py @@ -51,7 +51,7 @@ options: description: - If C(yes), modified files will be discarded. If C(no), module will fail if it encounters modified files. required: false - default: "yes" + default: "no" choices: [ "yes", "no" ] username: description: From e2c1a0d2dd7ba307cc8005c50e4d6138c8c7477f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 28 Jan 2015 16:25:56 -0800 Subject: [PATCH 130/236] Make documentation list when force changed defaults --- source_control/subversion.py | 1 + 1 file changed, 1 insertion(+) diff --git a/source_control/subversion.py b/source_control/subversion.py index 052afd9bb35..f4a0f65fd78 100644 --- a/source_control/subversion.py +++ b/source_control/subversion.py @@ -50,6 +50,7 @@ options: force: description: - If C(yes), modified files will be discarded. If C(no), module will fail if it encounters modified files. + Prior to 1.9 the default was `yes`. required: false default: "no" choices: [ "yes", "no" ] From 256ce9dd4dfbe2b0dc9eb5031812c8ab76418a22 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 28 Jan 2015 16:26:44 -0800 Subject: [PATCH 131/236] Change the git force parameter to default to no as a safety change. Fixes #306 --- source_control/git.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index 44ebf06487a..0cb87304a92 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -93,13 +93,14 @@ options: An example value could be "refs/meta/config". force: required: false - default: "yes" + default: "no" choices: [ "yes", "no" ] version_added: "0.7" description: - If C(yes), any modified files in the working repository will be discarded. Prior to 0.7, this was always - 'yes' and could not be disabled. + 'yes' and could not be disabled. Prior to 1.9, the default was + `yes` depth: required: false default: null @@ -611,7 +612,7 @@ def main(): remote=dict(default='origin'), refspec=dict(default=None), reference=dict(default=None), - force=dict(default='yes', type='bool'), + force=dict(default='no', type='bool'), depth=dict(default=None, type='int'), clone=dict(default='yes', type='bool'), update=dict(default='yes', type='bool'), From 0c49d9f6c65b0493af83cfec8418c61893c50217 Mon Sep 17 00:00:00 2001 From: Peter Mooshammer Date: Thu, 29 Jan 2015 10:19:01 -0800 Subject: [PATCH 132/236] adding disk_type option to choose from pd-ssd or pd-standard --- cloud/google/gce_pd.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/cloud/google/gce_pd.py b/cloud/google/gce_pd.py index ddfe711304e..1847f0eeb93 100644 --- a/cloud/google/gce_pd.py +++ b/cloud/google/gce_pd.py @@ -108,6 +108,14 @@ options: required: false default: null aliases: [] + disk_type: + version_added: "1.9" + description: + - type of disk provisioned + required: false + default: "pd-standard" + choices: ["pd-standard", "pd-ssd"] + aliases: [] requirements: [ "libcloud" ] author: Eric Johnson @@ -144,6 +152,7 @@ def main(): mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']), name = dict(required=True), size_gb = dict(default=10), + disk_type = dict(default='pd-standard'), image = dict(), snapshot = dict(), state = dict(default='present'), @@ -161,6 +170,7 @@ def main(): mode = module.params.get('mode') name = module.params.get('name') size_gb = module.params.get('size_gb') + disk_type = module.params.get('disk_type') image = module.params.get('image') snapshot = module.params.get('snapshot') state = module.params.get('state') @@ -174,7 +184,7 @@ def main(): disk = inst = None changed = is_attached = False - json_output = { 'name': name, 'zone': zone, 'state': state } + json_output = { 'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type } if detach_only: json_output['detach_only'] = True json_output['detached_from_instance'] = instance_name @@ -233,7 +243,7 @@ def main(): try: disk = gce.create_volume( size_gb, name, location=zone, image=lc_image, - snapshot=lc_snapshot) + snapshot=lc_snapshot, ex_disk_type=disk_type) except ResourceExistsError: pass except QuotaExceededError: From be744ce5e78f217eb34c4d369847f1e174da9ae6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 29 Jan 2015 18:17:48 -0800 Subject: [PATCH 133/236] Reverse the force parameter for the hg module --- source_control/hg.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/source_control/hg.py b/source_control/hg.py index c2bd0d9d953..d83215fabe1 100644 --- a/source_control/hg.py +++ b/source_control/hg.py @@ -54,9 +54,10 @@ options: aliases: [ version ] force: description: - - Discards uncommitted changes. Runs C(hg update -C). + - Discards uncommitted changes. Runs C(hg update -C). Prior to + 1.9, the default was `yes`. required: false - default: "yes" + default: "no" choices: [ "yes", "no" ] purge: description: @@ -207,7 +208,7 @@ def main(): repo = dict(required=True, aliases=['name']), dest = dict(required=True), revision = dict(default=None, aliases=['version']), - force = dict(default='yes', type='bool'), + force = dict(default='no', type='bool'), purge = dict(default='no', type='bool'), executable = dict(default=None), ), From 7baaf97a8b1dbf1c8b3652d12c2d0b3bb3a6e245 Mon Sep 17 00:00:00 2001 From: Sergey Zhukov Date: Fri, 30 Jan 2015 19:58:12 +0300 Subject: [PATCH 134/236] Fixed broken volumes_from for client API >= 1.10 --- cloud/docker/docker.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 1957c2d4db0..027d5c84bcc 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -731,6 +731,8 @@ class DockerManager(object): 'dns': self.module.params.get('dns'), 'volumes_from': self.module.params.get('volumes_from'), } + if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) >= 0: + params['volumes_from'] = "" if params['dns'] is not None: self.ensure_capability('dns') From 27c046ae792be84398647e49751dce00b7981293 Mon Sep 17 00:00:00 2001 From: Jeff Gonzalez Date: Tue, 3 Feb 2015 19:08:23 -0600 Subject: [PATCH 135/236] Refactored code to use module utility fetch_url function. --- system/authorized_key.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index c5d19521813..e1ac18a701d 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -122,7 +122,6 @@ import os.path import tempfile import re import shlex -import urllib2 class keydict(dict): @@ -337,19 +336,22 @@ def enforce_state(module, params): manage_dir = params.get("manage_dir", True) state = params.get("state", "present") key_options = params.get("key_options", None) + error_msg = "Error getting key from: %s" + # if the key is a url, request it and use it as key source if key.startswith("http"): - try: - gh_key = urllib2.urlopen(key).read() - except urllib2.URLError, e: - module.fail_json(msg="no key found at: %s" % key) - - key = gh_key + try: + resp, info = fetch_url(module, key) + if info['status'] != 200: + module.fail_json(msg=error_msg % key) + else: + key = resp.read() + except Exception: + module.fail_json(msg=error_msg % key) # extract individual keys into an array, skipping blank lines and comments key = [s for s in key.splitlines() if s and not s.startswith('#')] - # check current state -- just get the filename, don't create file do_write = False params["keyfile"] = keyfile(module, user, do_write, path, manage_dir) @@ -431,4 +433,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * main() From b451cbd37b7e9b02fcd051efa919d7411cd2fa9c Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Barth Date: Tue, 6 Jan 2015 17:10:11 +0100 Subject: [PATCH 136/236] EC2: move logic about terminated instances up (#423) As stated in #423, the commit 7f11c3d broke ec2 spot instance launching after 1.7.2. This is because it acts on the 'res' variable which have 2 different types in the method, and in case we request spot instances, the resulting object is not a result of ec2.run_instances() but ec2.request_spot_instances(). Actually this fix doesn't seem to be relevant in the spot instances case, because by construction we won't retrieve 'terminated' instances in the end. --- cloud/amazon/ec2.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) mode change 100644 => 100755 cloud/amazon/ec2.py diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py old mode 100644 new mode 100755 index 93b496cb5e8..1d58721bfe3 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -915,6 +915,17 @@ def create_instances(module, ec2, override_count=None): continue else: module.fail_json(msg = str(e)) + + # The instances returned through ec2.run_instances above can be in + # terminated state due to idempotency. See commit 7f11c3d for a complete + # explanation. + terminated_instances = [ str(instance.id) for instance in res.instances + if instance.state == 'terminated' ] + if terminated_instances: + module.fail_json(msg = "Instances with id(s) %s " % terminated_instances + + "were created previously but have since been terminated - " + + "use a (possibly different) 'instanceid' parameter") + else: if private_ip: module.fail_json( @@ -952,15 +963,6 @@ def create_instances(module, ec2, override_count=None): except boto.exception.BotoServerError, e: module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message)) - # The instances returned through run_instances can be in - # terminated state due to idempotency. - terminated_instances = [ str(instance.id) for instance in res.instances - if instance.state == 'terminated' ] - if terminated_instances: - module.fail_json(msg = "Instances with id(s) %s " % terminated_instances + - "were created previously but have since been terminated - " + - "use a (possibly different) 'instanceid' parameter") - # wait here until the instances are up num_running = 0 wait_timeout = time.time() + wait_timeout From 6090c4e109995929ab175e7795868f1b5b882eb2 Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Barth Date: Tue, 6 Jan 2015 17:34:57 +0100 Subject: [PATCH 137/236] Improve formatting after previous commit --- cloud/amazon/ec2.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 1d58721bfe3..d34931c9914 100755 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -919,12 +919,13 @@ def create_instances(module, ec2, override_count=None): # The instances returned through ec2.run_instances above can be in # terminated state due to idempotency. See commit 7f11c3d for a complete # explanation. - terminated_instances = [ str(instance.id) for instance in res.instances - if instance.state == 'terminated' ] + terminated_instances = [ + str(instance.id) for instance in res.instances if instance.state == 'terminated' + ] if terminated_instances: module.fail_json(msg = "Instances with id(s) %s " % terminated_instances + - "were created previously but have since been terminated - " + - "use a (possibly different) 'instanceid' parameter") + "were created previously but have since been terminated - " + + "use a (possibly different) 'instanceid' parameter") else: if private_ip: From 17c7d9c5d5c8c880f59a9a0009d795ca7ff19225 Mon Sep 17 00:00:00 2001 From: Graham Hay Date: Wed, 1 Oct 2014 08:20:56 +0100 Subject: [PATCH 138/236] Use insserv where available --- system/service.py | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index e1bd250a1ee..4cc839034f0 100644 --- a/system/service.py +++ b/system/service.py @@ -392,7 +392,7 @@ class LinuxService(Service): def get_service_tools(self): paths = [ '/sbin', '/usr/sbin', '/bin', '/usr/bin' ] - binaries = [ 'service', 'chkconfig', 'update-rc.d', 'rc-service', 'rc-update', 'initctl', 'systemctl', 'start', 'stop', 'restart' ] + binaries = [ 'service', 'chkconfig', 'update-rc.d', 'rc-service', 'rc-update', 'initctl', 'systemctl', 'start', 'stop', 'restart', 'insserv' ] initpaths = [ '/etc/init.d' ] location = dict() @@ -460,6 +460,9 @@ class LinuxService(Service): if location.get('update-rc.d', False): # and uses update-rc.d self.enable_cmd = location['update-rc.d'] + elif location.get('insserv', None): + # and uses insserv + self.enable_cmd = location['insserv'] elif location.get('chkconfig', False): # and uses chkconfig self.enable_cmd = location['chkconfig'] @@ -778,6 +781,41 @@ class LinuxService(Service): if not self.changed: return + # + # insserv (Debian 7) + # + if self.enable_cmd.endswith("insserv"): + if self.enable: + (rc, out, err) = self.execute_command("%s -n %s" % (self.enable_cmd, self.name)) + else: + (rc, out, err) = self.execute_command("%s -nr %s" % (self.enable_cmd, self.name)) + + self.changed = False + for line in err.splitlines(): + if self.enable and line.find('enable service') != -1: + self.changed = True + break + if not self.enable and line.find('remove service') != -1: + self.changed = True + break + + if self.module.check_mode: + self.module.exit_json(changed=self.changed) + + if not self.changed: + return + + if self.enable: + (rc, out, err) = self.execute_command("%s %s" % (self.enable_cmd, self.name)) + if (rc != 0) or (err != ''): + self.module.fail_json(msg=("Failed to install service. rc: %s, out: %s, err: %s" % (rc, out, err))) + return (rc, out, err) + else: + (rc, out, err) = self.execute_command("%s -r %s" % (self.enable_cmd, self.name)) + if (rc != 0) or (err != ''): + self.module.fail_json(msg=("Failed to remove service. rc: %s, out: %s, err: %s" % (rc, out, err))) + return (rc, out, err) + # # If we've gotten to the end, the service needs to be updated # From ac1a64e62fc77c1c52d3f86e9e0b661410586f4b Mon Sep 17 00:00:00 2001 From: Philippe ALEXANDRE Date: Fri, 6 Feb 2015 21:29:25 +0100 Subject: [PATCH 139/236] Add Linuxmint support in hostname module --- system/hostname.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/system/hostname.py b/system/hostname.py index f645a8cdfd3..3ec243af1f6 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -410,6 +410,11 @@ class UbuntuHostname(Hostname): distribution = 'Ubuntu' strategy_class = DebianStrategy +class LinuxmintHostname(Hostname): + platform = 'Linux' + distribution = 'Linuxmint' + strategy_class = DebianStrategy + class LinaroHostname(Hostname): platform = 'Linux' distribution = 'Linaro' From d92cd071563647d6058909260e5431d79ab564b1 Mon Sep 17 00:00:00 2001 From: Coderah Date: Fri, 6 Feb 2015 15:18:15 -0800 Subject: [PATCH 140/236] add hosted_zone information to ec2_elb_lb return --- cloud/amazon/ec2_elb_lb.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index d83db113963..3aef02aa18b 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -328,7 +328,9 @@ class ElbManager(object): 'security_group_ids': check_elb.security_groups, 'status': self.status, 'subnets': self.subnets, - 'scheme': check_elb.scheme + 'scheme': check_elb.scheme, + 'hosted_zone_name': check_elb.canonical_hosted_zone_name, + 'hosted_zone_id': check_elb.canonical_hosted_zone_name_id } if check_elb.health_check: From aa2b1f033b98e709ef95fd99ec3621d1f794a9fd Mon Sep 17 00:00:00 2001 From: Coderah Date: Fri, 6 Feb 2015 15:19:11 -0800 Subject: [PATCH 141/236] add alias support to route53 module --- cloud/amazon/route53.py | 83 ++++++++++++++++++++++++++++++++--------- 1 file changed, 65 insertions(+), 18 deletions(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 7fbe8552f41..c5af963ce3e 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -54,9 +54,23 @@ options: default: null aliases: [] choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS' ] + alias: + description: + - Indicates if this is an alias record. + required: false + version_added: 1.8 + default: False + aliases: [] + alias_hosted_zone_id: + description: + - The hosted zone identifier. + required: false + version_added: 1.8 + default: null + aliases: [] value: description: - - The new value when creating a DNS record. Multiple comma-spaced values are allowed. When deleting a record all values for the record must be specified or Route53 will not delete it. + - The new value when creating a DNS record. Multiple comma-spaced values are allowed for non-alias records. When deleting a record all values for the record must be specified or Route53 will not delete it. required: false default: null aliases: [] @@ -137,6 +151,16 @@ EXAMPLES = ''' ttl: "7200" value: '"bar"' +# Add an alias record that points to an Amazon ELB: +- route53: + command=create + zone=foo.com + record=elb.foo.com + type=A + value="{{ elb_dns_name }}" + alias=yes + alias_hosted_zone_id="{{ elb_zone_id }}" + ''' @@ -168,25 +192,28 @@ def commit(changes, retry_interval): def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( - command = dict(choices=['get', 'create', 'delete'], required=True), - zone = dict(required=True), - record = dict(required=True), - ttl = dict(required=False, default=3600), - type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True), - value = dict(required=False), - overwrite = dict(required=False, type='bool'), - retry_interval = dict(required=False, default=500) + command = dict(choices=['get', 'create', 'delete'], required=True), + zone = dict(required=True), + record = dict(required=True), + ttl = dict(required=False, default=3600), + type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True), + alias = dict(required=False, type='bool'), + alias_hosted_zone_id = dict(required=False), + value = dict(required=False), + overwrite = dict(required=False, type='bool'), + retry_interval = dict(required=False, default=500) ) ) module = AnsibleModule(argument_spec=argument_spec) - command_in = module.params.get('command') - zone_in = module.params.get('zone').lower() - ttl_in = module.params.get('ttl') - record_in = module.params.get('record').lower() - type_in = module.params.get('type') - value_in = module.params.get('value') - retry_interval_in = module.params.get('retry_interval') + command_in = module.params.get('command') + zone_in = module.params.get('zone').lower() + ttl_in = module.params.get('ttl') + record_in = module.params.get('record').lower() + type_in = module.params.get('type') + value_in = module.params.get('value') + alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id') + retry_interval_in = module.params.get('retry_interval') ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) @@ -207,6 +234,11 @@ def main(): if command_in == 'create' or command_in == 'delete': if not value_in: module.fail_json(msg = "parameter 'value' required for create/delete") + elif module.params['alias']: + if len(value_list) != 1: + module.fail_json(msg = "parameter 'value' must contain a single dns name for alias create/delete") + elif not alias_hosted_zone_id_in: + module.fail_json(msg = "parameter 'alias_hosted_zone_id' required for alias create/delete") # connect to the route53 endpoint try: @@ -244,6 +276,15 @@ def main(): record['ttl'] = rset.ttl record['value'] = ','.join(sorted(rset.resource_records)) record['values'] = sorted(rset.resource_records) + if rset.alias_dns_name: + record['alias'] = True + record['value'] = rset.alias_dns_name + record['values'] = [rset.alias_dns_name] + record['alias_hosted_zone_id'] = rset.alias_hosted_zone_id + else: + record['alias'] = False + record['value'] = ','.join(sorted(rset.resource_records)) + record['values'] = sorted(rset.resource_records) if value_list == sorted(rset.resource_records) and int(record['ttl']) == ttl_in and command_in == 'create': module.exit_json(changed=False) @@ -261,12 +302,18 @@ def main(): else: change = changes.add_change("DELETE", record_in, type_in, record['ttl']) for v in record['values']: - change.add_value(v) + if record['alias']: + change.set_alias(record['alias_hosted_zone_id'], v) + else: + change.add_value(v) if command_in == 'create' or command_in == 'delete': change = changes.add_change(command_in.upper(), record_in, type_in, ttl_in) for v in value_list: - change.add_value(v) + if module.params['alias']: + change.set_alias(alias_hosted_zone_id_in, v) + else: + change.add_value(v) try: result = commit(changes, retry_interval_in) From b20b29bd1778b0bcf2da8947218b4ad62494e226 Mon Sep 17 00:00:00 2001 From: Jon Hadfield Date: Sun, 8 Feb 2015 17:02:37 +0000 Subject: [PATCH 142/236] use is-enabled to check systemd service status. --- system/service.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/system/service.py b/system/service.py index e1bd250a1ee..2ea3d9c9583 100644 --- a/system/service.py +++ b/system/service.py @@ -478,6 +478,12 @@ class LinuxService(Service): if location.get('initctl', False): self.svc_initctl = location['initctl'] + def get_systemd_service_enabled(self): + (rc, out, err) = self.execute_command("%s is-enabled %s" % (self.enable_cmd, self.__systemd_unit,)) + if rc == 0: + return True + return False + def get_systemd_status_dict(self): (rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit,)) if rc != 0: @@ -692,12 +698,11 @@ class LinuxService(Service): action = 'disable' # Check if we're already in the correct state - d = self.get_systemd_status_dict() - if "UnitFileState" in d: - if self.enable and d["UnitFileState"] == "enabled": - self.changed = False - elif not self.enable and d["UnitFileState"] == "disabled": - self.changed = False + service_enabled = self.get_systemd_service_enabled() + if self.enable and service_enabled: + self.changed = False + elif not self.enable and not service_enabled: + self.changed = False elif not self.enable: self.changed = False From 5854f53cdcae5a80c54ec210ba04497d76ddd87c Mon Sep 17 00:00:00 2001 From: Capi Etheriel Date: Wed, 4 Feb 2015 18:20:11 -0200 Subject: [PATCH 143/236] add ec2 instance tags and groups info --- cloud/amazon/ec2.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index d34931c9914..630e665a277 100755 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -590,7 +590,9 @@ def get_instance_info(inst): 'root_device_type': inst.root_device_type, 'root_device_name': inst.root_device_name, 'state': inst.state, - 'hypervisor': inst.hypervisor} + 'hypervisor': inst.hypervisor, + 'tags': inst.tags, + 'groups': {group.id: group.name for group in inst.groups}} try: instance_info['virtualization_type'] = getattr(inst,'virtualization_type') except AttributeError: From e0c5b4340ddd9698c4321dca3edaf1a55bf3e3f2 Mon Sep 17 00:00:00 2001 From: Jesse Keating Date: Mon, 9 Feb 2015 14:03:20 -0800 Subject: [PATCH 144/236] Add exclusive option to authorized_keys This option allows the module to ensure that ONLY the specified keys exist in the authorized_keys file. All others will be removed. This is quite useful when rotating keys and ensuring no other key will be accepted. --- system/authorized_key.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/system/authorized_key.py b/system/authorized_key.py index e1ac18a701d..06d16da5ee7 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -70,6 +70,15 @@ options: required: false default: null version_added: "1.4" + exclusive: + description: + - Whether to remove all other non-specified keys from the + authorized_keys file. Multiple keys can be specified in a single + key= string value by separating them by newlines. + required: false + choices: [ yes", "no" ] + default: "no" + version_added: "1.9" description: - "Adds or removes authorized keys for particular user accounts" author: Brad Olson @@ -101,6 +110,9 @@ EXAMPLES = ''' key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" key_options='no-port-forwarding,host="10.0.1.1"' +# Set up authorized_keys exclusively with one key +- authorized_keys: user=root key=public_keys/doe-jane state=present + exclusive=yes ''' # Makes sure the public key line is present or absent in the user's .ssh/authorized_keys. @@ -336,6 +348,7 @@ def enforce_state(module, params): manage_dir = params.get("manage_dir", True) state = params.get("state", "present") key_options = params.get("key_options", None) + exclusive = params.get("exclusive", False) error_msg = "Error getting key from: %s" # if the key is a url, request it and use it as key source @@ -357,6 +370,10 @@ def enforce_state(module, params): params["keyfile"] = keyfile(module, user, do_write, path, manage_dir) existing_keys = readkeys(module, params["keyfile"]) + # Add a place holder for keys that should exist in the state=present and + # exclusive=true case + keys_to_exist = [] + # Check our new keys, if any of them exist we'll continue. for new_key in key: parsed_new_key = parsekey(module, new_key) @@ -386,6 +403,7 @@ def enforce_state(module, params): # handle idempotent state=present if state=="present": + keys_to_exist.append(parsed_new_key[0]) if len(non_matching_keys) > 0: for non_matching_key in non_matching_keys: if non_matching_key[0] in existing_keys: @@ -402,6 +420,13 @@ def enforce_state(module, params): del existing_keys[parsed_new_key[0]] do_write = True + # remove all other keys to honor exclusive + if state == "present" and exclusive: + to_remove = frozenset(existing_keys).difference(keys_to_exist) + for key in to_remove: + del existing_keys[key] + do_write = True + if do_write: if module.check_mode: module.exit_json(changed=True) @@ -424,6 +449,7 @@ def main(): state = dict(default='present', choices=['absent','present']), key_options = dict(required=False, type='str'), unique = dict(default=False, type='bool'), + exclusive = dict(default=False, type='bool'), ), supports_check_mode=True ) From df53b85259dadc0054de653b4e20426acbfca710 Mon Sep 17 00:00:00 2001 From: Luc Bourlier Date: Fri, 6 Feb 2015 13:50:43 +0000 Subject: [PATCH 145/236] Don't use 'tenancy' in ec2 spot requests The problem was introduced in commit f5789e8e. 'tenancy' is a parameter of ec2.run_instances, but not in ec2.request_spot_instances. So it was breaking the support for spot requests. --- cloud/amazon/ec2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index d34931c9914..4d65fbf9841 100755 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -834,7 +834,8 @@ def create_instances(module, ec2, override_count=None): if ebs_optimized: params['ebs_optimized'] = ebs_optimized - if tenancy: + # 'tenancy' always has a default value, but it is not a valid parameter for spot instance resquest + if not spot_price: params['tenancy'] = tenancy if boto_supports_profile_name_arg(ec2): From ab8b4c0270dd156df71d6b5614f256b7316d78be Mon Sep 17 00:00:00 2001 From: moussa taifi Date: Tue, 10 Feb 2015 11:14:28 -0500 Subject: [PATCH 146/236] Adds the currently available instance types for azure to the available dict(choices..... --- cloud/azure/azure.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/cloud/azure/azure.py b/cloud/azure/azure.py index 1679fbc45d1..d0c883c2b2d 100644 --- a/cloud/azure/azure.py +++ b/cloud/azure/azure.py @@ -173,7 +173,20 @@ AZURE_ROLE_SIZES = ['ExtraSmall', 'Basic_A1', 'Basic_A2', 'Basic_A3', - 'Basic_A4'] + 'Basic_A4', + 'Standard_D1', + 'Standard_D2', + 'Standard_D3', + 'Standard_D4', + 'Standard_D11', + 'Standard_D12', + 'Standard_D13', + 'Standard_D14', + 'Standard_G1', + 'Standard_G2', + 'Sandard_G3', + 'Standard_G4', + 'Standard_G5'] try: import azure as windows_azure From 7e8a518592c522bb1f035f5179ba501ef87df8b0 Mon Sep 17 00:00:00 2001 From: giovtorres Date: Tue, 10 Feb 2015 14:56:16 -0500 Subject: [PATCH 147/236] Added pip example installing from local tarball --- packaging/language/pip.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index 97576a5258b..d9ecc17af64 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -113,6 +113,9 @@ EXAMPLES = ''' # Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args. - pip: name='svn+http://myrepo/svn/MyApp#egg=MyApp' +# Install (MyApp) from local tarball +- pip: name='file:///path/to/MyApp.tar.gz' + # Install (Bottle) into the specified (virtualenv), inheriting none of the globally installed modules - pip: name=bottle virtualenv=/my_app/venv From b42b0f49b9600885ad046506dbe795892515dcda Mon Sep 17 00:00:00 2001 From: Paul Geraghty Date: Wed, 11 Feb 2015 02:18:16 +0000 Subject: [PATCH 148/236] Correct grammar of comment relating to example --- system/user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/user.py b/system/user.py index 804ff5b9bd6..9746ccc6328 100644 --- a/system/user.py +++ b/system/user.py @@ -199,7 +199,7 @@ EXAMPLES = ''' # Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa - user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa -# added a consultant who's account you want to expire +# added a consultant whose account you want to expire - user: name=james18 shell=/bin/zsh groups=developers expires=1422403387 ''' From 374dd06c8ed8549d39041d9a424b301736582674 Mon Sep 17 00:00:00 2001 From: Tomer Paz Date: Wed, 11 Feb 2015 09:25:24 +0200 Subject: [PATCH 149/236] resolve issue 764 https://github.com/ansible/ansible-modules-core/issues/764 added 'domainname' attribute in addition to existing 'hostname'. That's the most elegant way to delegate domain name to docker-py... --- cloud/docker/docker.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 1957c2d4db0..639c08c0ed0 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -138,6 +138,12 @@ options: required: false default: null aliases: [] + domainname: + description: + - Set container domain name + required: false + default: null + aliases: [] env: description: - Set environment variables (e.g. env="PASSWORD=sEcRe7,WORKERS=4") @@ -724,6 +730,7 @@ class DockerManager(object): 'mem_limit': _human_to_bytes(self.module.params.get('memory_limit')), 'environment': self.env, 'hostname': self.module.params.get('hostname'), + 'domainname': self.module.params.get('domainname'), 'detach': self.module.params.get('detach'), 'name': self.module.params.get('name'), 'stdin_open': self.module.params.get('stdin_open'), @@ -853,6 +860,7 @@ def main(): email = dict(), registry = dict(), hostname = dict(default=None), + domainname = dict(default=None), env = dict(type='dict'), dns = dict(), detach = dict(default=True, type='bool'), From b84f566ee769cc275939babe5294484d0cf7c612 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 11 Feb 2015 14:05:27 -0800 Subject: [PATCH 150/236] Tabs to spaces --- system/authorized_key.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index 06d16da5ee7..4ca4f30f32e 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -353,14 +353,14 @@ def enforce_state(module, params): # if the key is a url, request it and use it as key source if key.startswith("http"): - try: + try: resp, info = fetch_url(module, key) - if info['status'] != 200: + if info['status'] != 200: module.fail_json(msg=error_msg % key) - else: - key = resp.read() - except Exception: - module.fail_json(msg=error_msg % key) + else: + key = resp.read() + except Exception: + module.fail_json(msg=error_msg % key) # extract individual keys into an array, skipping blank lines and comments key = [s for s in key.splitlines() if s and not s.startswith('#')] From 55b06568b0d02965808071141f0126a9d0cd30ae Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 12 Feb 2015 09:31:45 -0500 Subject: [PATCH 151/236] added note that url src is only avaialable from 1.9 on --- system/authorized_key.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index 4ca4f30f32e..4d2af6a3bcc 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -37,7 +37,7 @@ options: aliases: [] key: description: - - The SSH public key(s), as a string or url (https://github.com/username.keys) + - The SSH public key(s), as a string or (since 1.9) url (https://github.com/username.keys) required: true default: null path: From 1f2ffb390007300486b2cb2de0a1dc66bd66354c Mon Sep 17 00:00:00 2001 From: Dagobert Michelsen Date: Thu, 12 Feb 2015 17:04:31 +0100 Subject: [PATCH 152/236] Solaris SMF is already supported and should be mentioned in the docs, --- system/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index e9751c10be5..3aa2e9a0515 100644 --- a/system/service.py +++ b/system/service.py @@ -26,7 +26,7 @@ version_added: "0.1" short_description: Manage services. description: - Controls services on remote hosts. Supported init systems include BSD init, - OpenRC, SysV, systemd, upstart. + OpenRC, SysV, Solaris SMF, systemd, upstart. options: name: required: true From ea1122d49f84ab4f43637d3770bfe61dce7eb6f9 Mon Sep 17 00:00:00 2001 From: Robert Marsa Date: Thu, 12 Feb 2015 09:27:39 -0800 Subject: [PATCH 153/236] Added target_tags to gce_module. --- cloud/google/gce_net.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/cloud/google/gce_net.py b/cloud/google/gce_net.py index 10592d20033..bafe6d1d43a 100644 --- a/cloud/google/gce_net.py +++ b/cloud/google/gce_net.py @@ -66,6 +66,13 @@ options: required: false default: null aliases: [] + target_tags: + version_added: "1.9" + description: + - the target instance tags for creating a firewall rule + required: false + default: null + aliases: [] state: description: - desired state of the persistent disk @@ -158,6 +165,7 @@ def main(): name = dict(), src_range = dict(type='list'), src_tags = dict(type='list'), + target_tags = dict(type='list'), state = dict(default='present'), service_account_email = dict(), pem_file = dict(), @@ -173,6 +181,7 @@ def main(): name = module.params.get('name') src_range = module.params.get('src_range') src_tags = module.params.get('src_tags') + target_tags = module.params.get('target_tags') state = module.params.get('state') changed = False @@ -218,7 +227,7 @@ def main(): try: gce.ex_create_firewall(fwname, allowed_list, network=name, - source_ranges=src_range, source_tags=src_tags) + source_ranges=src_range, source_tags=src_tags, target_tags=target_tags) changed = True except ResourceExistsError: pass @@ -229,6 +238,7 @@ def main(): json_output['allowed'] = allowed json_output['src_range'] = src_range json_output['src_tags'] = src_tags + json_output['target_tags'] = target_tags if state in ['absent', 'deleted']: if fwname: From 584f210eead955b6b2bda1a5c755bd0e2d6e9680 Mon Sep 17 00:00:00 2001 From: Robert Marsa Date: Thu, 12 Feb 2015 14:24:03 -0800 Subject: [PATCH 154/236] Added the ability to set an instance to forward ip. --- cloud/google/gce.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/cloud/google/gce.py b/cloud/google/gce.py index d429b61de20..4105baa30f4 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -115,6 +115,13 @@ options: required: true default: "us-central1-a" aliases: [] + ip_forward: + version_added: "1.9" + description: + - set to true if the instance can forward ip packets (useful for gateways) + required: false + default: "false" + aliases: [] requirements: [ "libcloud" ] notes: @@ -235,7 +242,7 @@ def get_instance_info(inst): 'status': ('status' in inst.extra) and inst.extra['status'] or None, 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [], 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None, - }) + }) def create_instances(module, gce, instance_names): """Creates new instances. Attributes other than instance_names are picked @@ -259,6 +266,7 @@ def create_instances(module, gce, instance_names): state = module.params.get('state') tags = module.params.get('tags') zone = module.params.get('zone') + ip_forward = module.params.get('ip_forward') new_instances = [] changed = False @@ -319,7 +327,7 @@ def create_instances(module, gce, instance_names): try: inst = gce.create_node(name, lc_machine_type, lc_image, location=lc_zone, ex_network=network, ex_tags=tags, - ex_metadata=metadata, ex_boot_disk=pd) + ex_metadata=metadata, ex_boot_disk=pd, ex_can_ip_forward=ip_forward) changed = True except ResourceExistsError: inst = gce.ex_get_node(name, lc_zone) @@ -409,6 +417,7 @@ def main(): service_account_email = dict(), pem_file = dict(), project_id = dict(), + ip_forward = dict(type='bool', default=False), ) ) @@ -424,6 +433,7 @@ def main(): state = module.params.get('state') tags = module.params.get('tags') zone = module.params.get('zone') + ip_forward = module.params.get('ip_forward') changed = False inames = [] From 18792f4e48e90e9a260a24005996860722f482b7 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 12 Feb 2015 23:57:39 -0500 Subject: [PATCH 155/236] added note to add_hosts about loop bypass --- inventory/add_host.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/inventory/add_host.py b/inventory/add_host.py index 4fd4e1eb15f..0f1b84abcd2 100644 --- a/inventory/add_host.py +++ b/inventory/add_host.py @@ -5,7 +5,7 @@ DOCUMENTATION = ''' module: add_host short_description: add a host (and alternatively a group) to the ansible-playbook in-memory inventory description: - - Use variables to create new hosts and groups in inventory for use in later plays of the same playbook. + - Use variables to create new hosts and groups in inventory for use in later plays of the same playbook. Takes variables so you can define the new hosts more fully. version_added: "0.9" options: @@ -13,12 +13,15 @@ options: aliases: [ 'hostname', 'host' ] description: - The hostname/ip of the host to add to the inventory, can include a colon and a port number. - required: true + required: true groups: aliases: [ 'groupname', 'group' ] description: - The groups to add the hostname to, comma separated. required: false +notes: + - This module bypasses the play host loop and only runs once for all the hosts in the play, if you need it + to iterate use a with_ directive. author: Seth Vidal ''' From 1555cfeea236863572c8f9050804866eeddad9a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Gross?= Date: Thu, 28 Aug 2014 22:47:11 +0200 Subject: [PATCH 156/236] Add basic support for OS X (Darwin) user management. --- system/user.py | 317 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 317 insertions(+) mode change 100644 => 100755 system/user.py diff --git a/system/user.py b/system/user.py old mode 100644 new mode 100755 index 9746ccc6328..0939faf313a --- a/system/user.py +++ b/system/user.py @@ -81,6 +81,8 @@ options: the user example in the github examples directory for what this looks like in a playbook. The `FAQ `_ contains details on various ways to generate these password values. + Note on Darwin system, this value has to be cleartext. + Beware of security issues. state: required: false default: "present" @@ -1344,6 +1346,321 @@ class SunOS(User): return (rc, out, err) +# =========================================== +class DarwinUser(User): + """ + This is a Darwin Mac OS X User manipulation class. + Main differences are that Darwin:- + - Handles accounts in a database managed by dscl(1) + - Has no useradd/groupadd + - Does not create home directories + - User password must be cleartext + - UID must be given + - System users must ben under 500 + + This overrides the following methods from the generic class:- + - user_exists() + - create_user() + - remove_user() + - modify_user() + """ + platform = 'Darwin' + distribution = None + SHADOWFILE = None + + dscl_directory = '.' + + fields = [ + ('comment', 'RealName'), + ('home', 'NFSHomeDirectory'), + ('shell', 'UserShell'), + ('uid', 'UniqueID'), + ('group', 'PrimaryGroupID'), + ] + + def _get_dscl(self): + return [ self.module.get_bin_path('dscl', True), self.dscl_directory ] + + def _list_user_groups(self): + cmd = self._get_dscl() + cmd += [ '-search', '/Groups', 'GroupMembership', self.name ] + (rc, out, err) = self.execute_command(cmd) + groups = [] + for line in out.splitlines(): + if line.startswith(' ') or line.startswith(')'): + continue + groups.append(line.split()[0]) + return groups + + def _get_user_property(self, property): + '''Return user PROPERTY as given my dscl(1) read or None if not found.''' + cmd = self._get_dscl() + cmd += [ '-read', '/Users/%s' % self.name, property ] + (rc, out, err) = self.execute_command(cmd) + if rc != 0: + return None + # from dscl(1) + # if property contains embedded spaces, the list will instead be + # displayed one entry per line, starting on the line after the key. + lines = out.splitlines() + #sys.stderr.write('*** |%s| %s -> %s\n' % (property, out, lines)) + if len(lines) == 1: + return lines[0].split(': ')[1] + else: + if len(lines) > 2: + return '\n'.join([ lines[1].strip() ] + lines[2:]) + else: + if len(lines) == 2: + return lines[1].strip() + else: + return None + + def _change_user_password(self): + '''Change password for SELF.NAME against SELF.PASSWORD. + + Please note that password must be cleatext. + ''' + # some documentation on how is stored passwords on OSX: + # http://blog.lostpassword.com/2012/07/cracking-mac-os-x-lion-accounts-passwords/ + # http://null-byte.wonderhowto.com/how-to/hack-mac-os-x-lion-passwords-0130036/ + # http://pastebin.com/RYqxi7Ca + # on OSX 10.8+ hash is SALTED-SHA512-PBKDF2 + # https://pythonhosted.org/passlib/lib/passlib.hash.pbkdf2_digest.html + # https://gist.github.com/nueh/8252572 + cmd = self._get_dscl() + if self.password: + cmd += [ '-passwd', '/Users/%s' % self.name, self.password] + else: + cmd += [ '-create', '/Users/%s' % self.name, 'Password', '*'] + (rc, out, err) = self.execute_command(cmd) + if rc != 0: + self.module.fail_json(msg='Error when changing password', + err=err, out=out, rc=rc) + return (rc, out, err) + + def _make_group_numerical(self): + '''Convert SELF.GROUP to is stringed numerical value suitable for dscl.''' + if self.group is not None: + try: + self.group = grp.getgrnam(self.group).gr_gid + except KeyError: + self.module.fail_json(msg='Group "%s" not found. Try to create it first using "group" module.' % self.group) + # We need to pass a string to dscl + self.group = str(self.group) + + def __modify_group(self, group, action): + '''Add or remove SELF.NAME to or from GROUP depending on ACTION. + ACTION can be 'add' or 'remove' otherwhise 'remove' is assumed. ''' + if action == 'add': + option = '-a' + else: + option = '-d' + cmd = [ 'dseditgroup', '-o', 'edit', option, self.name, + '-t', 'user', group ] + (rc, out, err) = self.execute_command(cmd) + if rc != 0: + self.module.fail_json(msg='Cannot %s user "%s" to group "%s".' + % (action, self.name, group), + err=err, out=out, rc=rc) + return (rc, out, err) + + def _modify_group(self): + '''Add or remove SELF.NAME to or from GROUP depending on ACTION. + ACTION can be 'add' or 'remove' otherwhise 'remove' is assumed. ''' + + rc = 0 + out = '' + err = '' + changed = False + + current = set(self._list_user_groups()) + if self.groups is not None: + target = set(self.groups.split(',')) + else: + target = set([]) + + for remove in current - target: + (_rc, _err, _out) = self.__modify_group(remove, 'delete') + rc += rc + out += _out + err += _err + changed = True + + for add in target - current: + (_rc, _err, _out) = self.__modify_group(add, 'add') + rc += _rc + out += _out + err += _err + changed = True + + return (rc, err, out, changed) + + def _update_system_user(self): + '''Hide or show user on login window according SELF.SYSTEM. + + Returns 0 if a change has been made, None otherwhise.''' + + plist_file = '/Library/Preferences/com.apple.loginwindow.plist' + + # http://support.apple.com/kb/HT5017?viewlocale=en_US + uid = int(self.uid) + cmd = [ 'defaults', 'read', plist_file, 'HiddenUsersList' ] + (rc, out, err) = self.execute_command(cmd) + # returned value is + # ( + # "_userA", + # "_UserB", + # userc + # ) + hidden_users = [] + for x in out.splitlines()[1:-1]: + try: + x = x.split('"')[1] + except IndexError: + x = x.strip() + hidden_users.append(x) + + if self.system: + if not self.name in hidden_users: + cmd = [ 'defaults', 'write', plist_file, + 'HiddenUsersList', '-array-add', self.name ] + (rc, out, err) = self.execute_command(cmd) + if rc != 0: + self.module.fail_json( + msg='Cannot user "%s" to hidden user list.' + % self.name, err=err, out=out, rc=rc) + return 0 + else: + if self.name in hidden_users: + del(hidden_users[hidden_users.index(self.name)]) + + cmd = [ 'defaults', 'write', plist_file, + 'HiddenUsersList', '-array' ] + hidden_users + (rc, out, err) = self.execute_command(cmd) + if rc != 0: + self.module.fail_json( + msg='Cannot remove user "%s" from hidden user list.' + % self.name, err=err, out=out, rc=rc) + return 0 + + def user_exists(self): + '''Check is SELF.NAME is a known user on the system.''' + cmd = self._get_dscl() + cmd += [ '-list', '/Users/%s' % self.name] + (rc, out, err) = self.execute_command(cmd) + return rc == 0 + + def remove_user(self): + '''Delete SELF.NAME. If SELF.FORCE is true, remove its home directory.''' + info = self.user_info() + + cmd = self._get_dscl() + cmd += [ '-delete', '/Users/%s' % self.name] + (rc, out, err) = self.execute_command(cmd) + + if rc != 0: + self.module.fail_json( + msg='Cannot delete user "%s".' + % self.name, err=err, out=out, rc=rc) + + if self.force: + if os.path.exists(info[5]): + shutil.rmtree(info[5]) + out += "Removed %s" % info[5] + + return (rc, out, err) + + def create_user(self, command_name='dscl'): + cmd = self._get_dscl() + cmd += [ '-create', '/Users/%s' % self.name] + (rc, err, out) = self.execute_command(cmd) + if rc != 0: + self.module.fail_json( + msg='Cannot create user "%s".' + % self.name, err=err, out=out, rc=rc) + + + self._make_group_numerical() + + # Homedir is not created by default + if self.createhome: + if self.home is None: + self.home = '/Users/%s' % self.name + if not os.path.exists(self.home): + os.makedirs(self.home) + self.chown_homedir(int(self.uid), int(self.group), self.home) + + for field in self.fields: + if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]: + + cmd = self._get_dscl() + cmd += [ '-create', '/Users/%s' % self.name, + field[1], self.__dict__[field[0]]] + (rc, _err, _out) = self.execute_command(cmd) + if rc != 0: + self.module.fail_json( + msg='Cannot add property "%s" to user "%s".' + % (field[0], self.name), err=err, out=out, rc=rc) + + out += _out + err += _err + if rc != 0: + return (rc, _err, _out) + + + (rc, _err, _out) = self._change_user_password() + out += _out + err += _err + + self._update_system_user() + # here we don't care about change status since it is a creation, + # thus changed is always true. + (rc, _out, _err, changed) = self._modify_group() + out += _out + err += _err + return (rc, err, out) + + def modify_user(self): + changed = None + out = '' + err = '' + + self._make_group_numerical() + + for field in self.fields: + if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]: + current = self._get_user_property(field[1]) + if current is None or current != self.__dict__[field[0]]: + cmd = self._get_dscl() + cmd += [ '-create', '/Users/%s' % self.name, + field[1], self.__dict__[field[0]]] + (rc, _err, _out) = self.execute_command(cmd) + if rc != 0: + self.module.fail_json( + msg='Cannot update property "%s" for user "%s".' + % (field[0], self.name), err=err, out=out, rc=rc) + changed = rc + out += _out + err += _err + if self.update_password == 'always': + (rc, _err, _out) = self._change_user_password() + out += _out + err += _err + changed = rc + + (rc, _out, _err, _changed) = self._modify_group() + out += _out + err += _err + + if _changed is True: + changed = rc + + rc = self._update_system_user() + if rc == 0: + changed = rc + + return (changed, out, err) + # =========================================== class AIX(User): From 67ce4cf416b6b1f21c0d28523e512851a28c1733 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Gross?= Date: Thu, 28 Aug 2014 23:24:32 +0200 Subject: [PATCH 157/236] Add basic support for OSX groups. --- system/group.py | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) mode change 100644 => 100755 system/group.py diff --git a/system/group.py b/system/group.py old mode 100644 new mode 100755 index 617de7c2857..83ea410b0b1 --- a/system/group.py +++ b/system/group.py @@ -251,6 +251,49 @@ class FreeBsdGroup(Group): # =========================================== + + +class DarwinGroup(Group): + """ + This is a Mac OS X Darwin Group manipulation class. + + This overrides the following methods from the generic class:- + - group_del() + - group_add() + - group_mod() + + group manupulation are done using dseditgroup(1). + """ + + platform = 'Darwin' + distribution = None + + def group_add(self, **kwargs): + cmd = [self.module.get_bin_path('dseditgroup', True)] + cmd += [ '-o', 'create' ] + cmd += [ '-i', self.gid ] + cmd += [ '-L', self.name ] + (rc, out, err) = self.execute_command(cmd) + return (rc, out, err) + + def group_del(self): + cmd = [self.module.get_bin_path('dseditgroup', True)] + cmd += [ '-o', 'delete' ] + cmd += [ '-L', self.name ] + (rc, out, err) = self.execute_command(cmd) + return (rc, out, err) + + def group_mod(self): + info = self.group_info() + if self.gid is not None and int(self.gid) != info[2]: + cmd = [self.module.get_bin_path('dseditgroup', True)] + cmd += [ '-o', 'edit' ] + cmd += [ '-i', self.gid ] + cmd += [ '-L', self.name ] + (rc, out, err) = self.execute_command(cmd) + return (rc, out, err) + return (None, '', '') + class OpenBsdGroup(Group): """ This is a OpenBSD Group manipulation class. From 3100236628ff47729ff0465272da21fb18f08288 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 13 Feb 2015 10:02:05 -0500 Subject: [PATCH 158/236] added missing docs on connection_timeout for wait_for --- utilities/logic/wait_for.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/utilities/logic/wait_for.py b/utilities/logic/wait_for.py index ae316fe1a17..00cc03fea56 100644 --- a/utilities/logic/wait_for.py +++ b/utilities/logic/wait_for.py @@ -63,6 +63,11 @@ options: - maximum number of seconds to wait for required: false default: 300 + connect_timeout: + description: + - maximum number of seconds to wait for a connection to happen before closing and retrying + required: false + default: 5 delay: description: - number of seconds to wait before starting to poll From 611e8b59d6cf99de90272ff216088f42eb5d5c13 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 13 Feb 2015 11:06:06 -0500 Subject: [PATCH 159/236] now module fails gracefully instead of stacktrace when trying to install missing deb file --- packaging/os/apt.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 06f96f3f1d5..7cfb3d7f0e8 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -363,7 +363,10 @@ def install_deb(m, debs, cache, force, install_recommends, dpkg_options): deps_to_install = [] pkgs_to_install = [] for deb_file in debs.split(','): - pkg = apt.debfile.DebPackage(deb_file) + try: + pkg = apt.debfile.DebPackage(deb_file) + except SystemError, e: + m.fail_json(msg="Error: %s\nSystem Error: %s" % (pkg._failure_string,str(e))) # Check if it's already installed if pkg.compare_to_version_in_cache() == pkg.VERSION_SAME: From d03e9540a69daaed3579e8218c32b8bed57ff462 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 12 Jan 2015 16:56:31 -0600 Subject: [PATCH 160/236] Support accounts with more than 100 load balancers --- cloud/rackspace/rax_clb.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cloud/rackspace/rax_clb.py b/cloud/rackspace/rax_clb.py index 38baa77b6ff..a3deae6f4a7 100644 --- a/cloud/rackspace/rax_clb.py +++ b/cloud/rackspace/rax_clb.py @@ -152,7 +152,14 @@ def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') - for balancer in clb.list(): + balancer_list = clb.list() + while balancer_list: + retrieved = clb.list(marker=balancer_list.pop().id) + balancer_list.extend(retrieved) + if len(retrieved) < 2: + break + + for balancer in balancer_list: if name != balancer.name and name != balancer.id: continue From 1d92dd31a6c88746b17b220cbd5d886c6f085aa1 Mon Sep 17 00:00:00 2001 From: David Hummel Date: Sat, 14 Feb 2015 17:16:35 -0500 Subject: [PATCH 161/236] Fix issue #793: mysql_db: for state={absent,present} connections to database mysql fail for users other than root --- database/mysql/mysql_db.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index a62243206ec..7541683401e 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -312,7 +312,7 @@ def main(): module.fail_json(msg="with state=%s target is required" % (state)) connect_to_db = db else: - connect_to_db = 'mysql' + connect_to_db = '' try: if socket: try: From 8eb7870a5e9a929f5207436645d742f6f3da1b8b Mon Sep 17 00:00:00 2001 From: mcameron Date: Mon, 16 Feb 2015 14:58:53 +0000 Subject: [PATCH 162/236] Useful log output. --- cloud/openstack/nova_compute.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/nova_compute.py b/cloud/openstack/nova_compute.py index b51a1891a7d..1005023465a 100644 --- a/cloud/openstack/nova_compute.py +++ b/cloud/openstack/nova_compute.py @@ -324,7 +324,7 @@ def _add_floating_ip_from_pool(module, nova, server): try: new_ip = nova.floating_ips.create(pool) except Exception, e: - module.fail_json(msg = "Unable to create floating ip") + module.fail_json(msg = "Unable to create floating ip: %s" % (e.message)) pool_ips.append(new_ip.ip) # Add to the main list usable_floating_ips[pool] = pool_ips From 24f8792babd9abd7512e88cdaca5ec13e1114d1f Mon Sep 17 00:00:00 2001 From: mcameron Date: Mon, 16 Feb 2015 15:08:39 +0000 Subject: [PATCH 163/236] More general output. --- cloud/openstack/nova_compute.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/nova_compute.py b/cloud/openstack/nova_compute.py index 1005023465a..a9bd05b1e3f 100644 --- a/cloud/openstack/nova_compute.py +++ b/cloud/openstack/nova_compute.py @@ -356,7 +356,7 @@ def _add_auto_floating_ip(module, nova, server): try: new_ip = nova.floating_ips.create() except Exception as e: - module.fail_json(msg = "Unable to create floating ip: %s" % (e.message)) + module.fail_json(msg = "Unable to create floating ip: %s" % (e)) try: server.add_floating_ip(new_ip) From aa6c73f11b76e710fd436efa5055d8b506031312 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 16 Feb 2015 07:06:11 -0800 Subject: [PATCH 164/236] When recursing subdirectories, honor the follow parameter for setting file attributes. One half of the fix for https://github.com/ansible/ansible-modules-core/issues/778 The other half is in basic.py --- files/file.py | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/files/file.py b/files/file.py index 35bb52ab1e3..11c2cf2a585 100644 --- a/files/file.py +++ b/files/file.py @@ -120,6 +120,28 @@ def get_state(path): return 'absent' +def recursive_set_attributes(module, path, follow, file_args): + changed = False + for root, dirs, files in os.walk(path): + for fsobj in dirs + files: + fsname = os.path.join(root, fsobj) + if not os.path.islink(fsname): + tmp_file_args = file_args.copy() + tmp_file_args['path']=fsname + changed |= module.set_fs_attributes_if_different(tmp_file_args, changed) + else: + tmp_file_args = file_args.copy() + tmp_file_args['path']=fsname + changed |= module.set_fs_attributes_if_different(tmp_file_args, changed) + if follow: + fsname = os.path.join(root, os.readlink(fsname)) + if os.path.isdir(fsname): + changed |= recursive_set_attributes(module, fsname, follow, file_args) + tmp_file_args = file_args.copy() + tmp_file_args['path']=fsname + changed |= module.set_fs_attributes_if_different(tmp_file_args, changed) + return changed + def main(): module = AnsibleModule( @@ -234,7 +256,6 @@ def main(): module.exit_json(path=path, changed=changed) elif state == 'directory': - if follow and prev_state == 'link': path = os.readlink(path) prev_state = get_state(path) @@ -266,12 +287,7 @@ def main(): changed = module.set_fs_attributes_if_different(file_args, changed) if recurse: - for root,dirs,files in os.walk( file_args['path'] ): - for fsobj in dirs + files: - fsname=os.path.join(root, fsobj) - tmp_file_args = file_args.copy() - tmp_file_args['path']=fsname - changed = module.set_fs_attributes_if_different(tmp_file_args, changed) + changed |= recursive_set_attributes(module, file_args['path'], follow, file_args) module.exit_json(path=path, changed=changed) @@ -379,5 +395,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() From 98f869f34522c738f7351c3cc46d8fa75ddec33d Mon Sep 17 00:00:00 2001 From: Rahul Mehrotra Date: Sat, 14 Feb 2015 23:56:16 -0800 Subject: [PATCH 165/236] Fixed Documentation issue concerning file mode when specified using numberical value --- files/file.py | 1 + 1 file changed, 1 insertion(+) diff --git a/files/file.py b/files/file.py index 35bb52ab1e3..9b17ae40d2c 100644 --- a/files/file.py +++ b/files/file.py @@ -88,6 +88,7 @@ options: ''' EXAMPLES = ''' +# change file ownership, group and mode. When specifying mode using octal numbers, first digit should always be 0. - file: path=/etc/foo.conf owner=foo group=foo mode=0644 - file: src=/file/to/link/to dest=/path/to/symlink owner=foo group=foo state=link - file: src=/tmp/{{ item.path }} dest={{ item.dest }} state=link From b4ce4f37220f0cdf480c8e8aa637fdf3829459ab Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 16 Feb 2015 15:33:14 -0600 Subject: [PATCH 166/236] Don't hardcode a minimum size of a volume --- cloud/rackspace/rax_cbs.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/cloud/rackspace/rax_cbs.py b/cloud/rackspace/rax_cbs.py index 261168889cc..43488fa7cc6 100644 --- a/cloud/rackspace/rax_cbs.py +++ b/cloud/rackspace/rax_cbs.py @@ -108,9 +108,6 @@ except ImportError: def cloud_block_storage(module, state, name, description, meta, size, snapshot_id, volume_type, wait, wait_timeout): - if size < 100: - module.fail_json(msg='"size" must be greater than or equal to 100') - changed = False volume = None instance = {} From 9e0959d600e5414b53eeb32ee1eba1a46d43daf6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 16 Feb 2015 15:34:22 -0800 Subject: [PATCH 167/236] Fix for recursion traceback in copy with relative paths This is a further fix for: https://github.com/ansible/ansible/issues/9092 when the relative path contains a subdirectory. Like: ansible localhost -m copy -a 'src=/etc/group dest=foo/bar/' --- files/copy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/copy.py b/files/copy.py index c5aaa01b5b3..364996f5293 100644 --- a/files/copy.py +++ b/files/copy.py @@ -181,7 +181,7 @@ def main(): if original_basename and dest.endswith("/"): dest = os.path.join(dest, original_basename) dirname = os.path.dirname(dest) - if not os.path.exists(dirname) and '/' in dirname: + if not os.path.exists(dirname) and os.path.isabs(dirname): (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname) os.makedirs(dirname) directory_args = module.load_file_common_arguments(module.params) From b0dcff214a71473c907dc14eaa5f5b413a98f9a7 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 28 Nov 2014 12:22:12 -0600 Subject: [PATCH 168/236] Add boot from volume functionality to rax and rax_cbs modules --- cloud/rackspace/rax.py | 174 +++++++++++++++++++++++++++++++++---- cloud/rackspace/rax_cbs.py | 33 +++++-- 2 files changed, 184 insertions(+), 23 deletions(-) diff --git a/cloud/rackspace/rax.py b/cloud/rackspace/rax.py index 5fa1b57386a..4ec49e9736d 100644 --- a/cloud/rackspace/rax.py +++ b/cloud/rackspace/rax.py @@ -35,6 +35,34 @@ options: - "yes" - "no" version_added: 1.5 + boot_from_volume: + description: + - Whether or not to boot the instance from a Cloud Block Storage volume. + If C(yes) and I(image) is specified a new volume will be created at + boot time. I(boot_volume_size) is required with I(image) to create a + new volume at boot time. + default: "no" + choices: + - "yes" + - "no" + version_added: 1.9 + boot_volume: + description: + - Cloud Block Storage ID or Name to use as the boot volume of the + instance + version_added: 1.9 + boot_volume_size: + description: + - Size of the volume to create in Gigabytes. This is only required with + I(image) and I(boot_from_volume). + default: 100 + version_added: 1.9 + boot_volume_terminate: + description: + - Whether the I(boot_volume) or newly created volume from I(image) will + be terminated when the server is terminated + default: false + version_added: 1.9 config_drive: description: - Attach read-only configuration drive to server as label config-2 @@ -99,7 +127,9 @@ options: version_added: 1.4 image: description: - - image to use for the instance. Can be an C(id), C(human_id) or C(name) + - image to use for the instance. Can be an C(id), C(human_id) or C(name). + With I(boot_from_volume), a Cloud Block Storage volume will be created + with this image default: null instance_ids: description: @@ -213,7 +243,7 @@ except ImportError: def create(module, names=[], flavor=None, image=None, meta={}, key_name=None, files={}, wait=True, wait_timeout=300, disk_config=None, group=None, nics=[], extra_create_args={}, user_data=None, - config_drive=False, existing=[]): + config_drive=False, existing=[], block_device_mapping_v2=[]): cs = pyrax.cloudservers changed = False @@ -239,6 +269,7 @@ def create(module, names=[], flavor=None, image=None, meta={}, key_name=None, module.fail_json(msg='Failed to load %s' % lpath) try: servers = [] + bdmv2 = block_device_mapping_v2 for name in names: servers.append(cs.servers.create(name=name, image=image, flavor=flavor, meta=meta, @@ -247,6 +278,7 @@ def create(module, names=[], flavor=None, image=None, meta={}, key_name=None, disk_config=disk_config, config_drive=config_drive, userdata=user_data, + block_device_mapping_v2=bdmv2, **extra_create_args)) except Exception, e: module.fail_json(msg='%s' % e.message) @@ -394,7 +426,9 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, disk_config=None, count=1, group=None, instance_ids=[], exact_count=False, networks=[], count_offset=0, auto_increment=False, extra_create_args={}, user_data=None, - config_drive=False): + config_drive=False, boot_from_volume=False, + boot_volume=None, boot_volume_size=None, + boot_volume_terminate=False): cs = pyrax.cloudservers cnw = pyrax.cloud_networks if not cnw: @@ -402,6 +436,26 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') + if state == 'present' or (state == 'absent' and instance_ids is None): + for arg, value in dict(name=name, flavor=flavor).iteritems(): + if not value: + module.fail_json(msg='%s is required for the "rax" module' % + arg) + + if not boot_from_volume and not boot_volume and not image: + module.fail_json(msg='image is required for the "rax" module') + + if boot_from_volume and not image and not boot_volume: + module.fail_json(msg='image or boot_volume are required for the ' + '"rax" with boot_from_volume') + + if boot_from_volume and image and not boot_volume_size: + module.fail_json(msg='boot_volume_size is required for the "rax" ' + 'module with boot_from_volume and image') + + if boot_from_volume and image and boot_volume: + image = None + servers = [] # Add the group meta key @@ -438,12 +492,6 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, # act on the state if state == 'present': - for arg, value in dict(name=name, flavor=flavor, - image=image).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax" module' % - arg) - # Idempotent ensurance of a specific count of servers if exact_count is not False: # See if we can find servers that match our options @@ -583,7 +631,6 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, # Perform more simplistic matching search_opts = { 'name': '^%s$' % name, - 'image': image, 'flavor': flavor } servers = [] @@ -591,6 +638,36 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, # Ignore DELETED servers if server.status == 'DELETED': continue + + if not image and boot_volume: + vol = rax_find_bootable_volume(module, pyrax, server, + exit=False) + if not vol: + continue + volume_image_metadata = vol.volume_image_metadata + vol_image_id = volume_image_metadata.get('image_id') + if vol_image_id: + server_image = rax_find_image(module, pyrax, + vol_image_id, exit=False) + if server_image: + server.image = dict(id=server_image) + + # Match image IDs taking care of boot from volume + if image and not server.image: + vol = rax_find_bootable_volume(module, pyrax, server) + volume_image_metadata = vol.volume_image_metadata + vol_image_id = volume_image_metadata.get('image_id') + if not vol_image_id: + continue + server_image = rax_find_image(module, pyrax, + vol_image_id, exit=False) + if image != server_image: + continue + + server.image = dict(id=server_image) + elif image and server.image['id'] != image: + continue + # Ignore servers with non matching metadata if server.metadata != meta: continue @@ -616,34 +693,85 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, # them, we aren't performing auto_increment here names = [name] * (count - len(servers)) + block_device_mapping_v2 = [] + if boot_from_volume: + mapping = { + 'boot_index': '0', + 'delete_on_termination': boot_volume_terminate, + 'destination_type': 'volume', + } + if image: + if boot_volume_size < 100: + module.fail_json(msg='"boot_volume_size" must be greater ' + 'than or equal to 100') + mapping.update({ + 'uuid': image, + 'source_type': 'image', + 'volume_size': boot_volume_size, + }) + image = None + elif boot_volume: + volume = rax_find_volume(module, pyrax, boot_volume) + mapping.update({ + 'uuid': pyrax.utils.get_id(volume), + 'source_type': 'volume', + }) + block_device_mapping_v2.append(mapping) + create(module, names=names, flavor=flavor, image=image, meta=meta, key_name=key_name, files=files, wait=wait, wait_timeout=wait_timeout, disk_config=disk_config, group=group, nics=nics, extra_create_args=extra_create_args, user_data=user_data, config_drive=config_drive, - existing=servers) + existing=servers, + block_device_mapping_v2=block_device_mapping_v2) elif state == 'absent': if instance_ids is None: # We weren't given an explicit list of server IDs to delete # Let's match instead - for arg, value in dict(name=name, flavor=flavor, - image=image).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax" ' - 'module' % arg) search_opts = { 'name': '^%s$' % name, - 'image': image, 'flavor': flavor } for server in cs.servers.list(search_opts=search_opts): # Ignore DELETED servers if server.status == 'DELETED': continue + + if not image and boot_volume: + vol = rax_find_bootable_volume(module, pyrax, server, + exit=False) + if not vol: + continue + volume_image_metadata = vol.volume_image_metadata + vol_image_id = volume_image_metadata.get('image_id') + if vol_image_id: + server_image = rax_find_image(module, pyrax, + vol_image_id, exit=False) + if server_image: + server.image = dict(id=server_image) + + # Match image IDs taking care of boot from volume + if image and not server.image: + vol = rax_find_bootable_volume(module, pyrax, server) + volume_image_metadata = vol.volume_image_metadata + vol_image_id = volume_image_metadata.get('image_id') + if not vol_image_id: + continue + server_image = rax_find_image(module, pyrax, + vol_image_id, exit=False) + if image != server_image: + continue + + server.image = dict(id=server_image) + elif image and server.image['id'] != image: + continue + # Ignore servers with non matching metadata if meta != server.metadata: continue + servers.append(server) # Build a list of server IDs to delete @@ -672,6 +800,10 @@ def main(): argument_spec.update( dict( auto_increment=dict(default=True, type='bool'), + boot_from_volume=dict(default=False, type='bool'), + boot_volume=dict(type='str'), + boot_volume_size=dict(type='int', default=100), + boot_volume_terminate=dict(type='bool', default=False), config_drive=dict(default=False, type='bool'), count=dict(default=1, type='int'), count_offset=dict(default=1, type='int'), @@ -712,6 +844,10 @@ def main(): 'playbook pertaining to the "rax" module') auto_increment = module.params.get('auto_increment') + boot_from_volume = module.params.get('boot_from_volume') + boot_volume = module.params.get('boot_volume') + boot_volume_size = module.params.get('boot_volume_size') + boot_volume_terminate = module.params.get('boot_volume_terminate') config_drive = module.params.get('config_drive') count = module.params.get('count') count_offset = module.params.get('count_offset') @@ -757,7 +893,9 @@ def main(): exact_count=exact_count, networks=networks, count_offset=count_offset, auto_increment=auto_increment, extra_create_args=extra_create_args, user_data=user_data, - config_drive=config_drive) + config_drive=config_drive, boot_from_volume=boot_from_volume, + boot_volume=boot_volume, boot_volume_size=boot_volume_size, + boot_volume_terminate=boot_volume_terminate) # import module snippets diff --git a/cloud/rackspace/rax_cbs.py b/cloud/rackspace/rax_cbs.py index 43488fa7cc6..6f922f0128e 100644 --- a/cloud/rackspace/rax_cbs.py +++ b/cloud/rackspace/rax_cbs.py @@ -28,6 +28,12 @@ options: description: - Description to give the volume being created default: null + image: + description: + - image to use for bootable volumes. Can be an C(id), C(human_id) or + C(name). This option requires C(pyrax>=1.9.3) + default: null + version_added: 1.9 meta: description: - A hash of metadata to associate with the volume @@ -99,6 +105,8 @@ EXAMPLES = ''' register: my_volume ''' +from distutils.version import LooseVersion + try: import pyrax HAS_PYRAX = True @@ -107,7 +115,8 @@ except ImportError: def cloud_block_storage(module, state, name, description, meta, size, - snapshot_id, volume_type, wait, wait_timeout): + snapshot_id, volume_type, wait, wait_timeout, + image): changed = False volume = None instance = {} @@ -119,15 +128,26 @@ def cloud_block_storage(module, state, name, description, meta, size, 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') + if image: + # pyrax<1.9.3 did not have support for specifying an image when + # creating a volume which is required for bootable volumes + if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'): + module.fail_json(msg='Creating a bootable volume requires ' + 'pyrax>=1.9.3') + image = rax_find_image(module, pyrax, image) + volume = rax_find_volume(module, pyrax, name) if state == 'present': if not volume: + kwargs = dict() + if image: + kwargs['image'] = image try: volume = cbs.create(name, size=size, volume_type=volume_type, description=description, metadata=meta, - snapshot_id=snapshot_id) + snapshot_id=snapshot_id, **kwargs) changed = True except Exception, e: module.fail_json(msg='%s' % e.message) @@ -168,7 +188,8 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - description=dict(), + description=dict(type='str'), + image=dict(type='str'), meta=dict(type='dict', default={}), name=dict(required=True), size=dict(type='int', default=100), @@ -189,6 +210,7 @@ def main(): module.fail_json(msg='pyrax is required for this module') description = module.params.get('description') + image = module.params.get('image') meta = module.params.get('meta') name = module.params.get('name') size = module.params.get('size') @@ -201,11 +223,12 @@ def main(): setup_rax_module(module, pyrax) cloud_block_storage(module, state, name, description, meta, size, - snapshot_id, volume_type, wait, wait_timeout) + snapshot_id, volume_type, wait, wait_timeout, + image) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.rax import * -### invoke the module +# invoke the module main() From d0e82fa3af328ac9985c5605bc23e98164d8af64 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 18 Dec 2014 13:02:44 -0600 Subject: [PATCH 169/236] Fix order of logic to determine required args --- cloud/rackspace/rax.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/rackspace/rax.py b/cloud/rackspace/rax.py index 4ec49e9736d..3f6f7f334bd 100644 --- a/cloud/rackspace/rax.py +++ b/cloud/rackspace/rax.py @@ -437,14 +437,14 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, 'incorrectly capitalized region name.') if state == 'present' or (state == 'absent' and instance_ids is None): + if not boot_from_volume and not boot_volume and not image: + module.fail_json(msg='image is required for the "rax" module') + for arg, value in dict(name=name, flavor=flavor).iteritems(): if not value: module.fail_json(msg='%s is required for the "rax" module' % arg) - if not boot_from_volume and not boot_volume and not image: - module.fail_json(msg='image is required for the "rax" module') - if boot_from_volume and not image and not boot_volume: module.fail_json(msg='image or boot_volume are required for the ' '"rax" with boot_from_volume') From f1667ba53b8342c397165043cf2380225e448a2c Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 16 Feb 2015 15:31:28 -0600 Subject: [PATCH 170/236] Don't hardcode minimum for boot_volume_size --- cloud/rackspace/rax.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/cloud/rackspace/rax.py b/cloud/rackspace/rax.py index 3f6f7f334bd..1515865713c 100644 --- a/cloud/rackspace/rax.py +++ b/cloud/rackspace/rax.py @@ -701,9 +701,6 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, 'destination_type': 'volume', } if image: - if boot_volume_size < 100: - module.fail_json(msg='"boot_volume_size" must be greater ' - 'than or equal to 100') mapping.update({ 'uuid': image, 'source_type': 'image', From 6c1f40fea7d250ddd3f82cba28137b77ab258b62 Mon Sep 17 00:00:00 2001 From: Gauvain Pocentek Date: Tue, 17 Feb 2015 09:06:26 +0100 Subject: [PATCH 171/236] fetch: fix the flat parameter description ansible-doc expects the value of the description field to be a list, otherwise the output is not correct. This patch updates the flat description to be a list. --- files/fetch.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/files/fetch.py b/files/fetch.py index fd631e6ebe6..04bebd0301c 100644 --- a/files/fetch.py +++ b/files/fetch.py @@ -45,10 +45,10 @@ options: flat: version_added: "1.2" description: - Allows you to override the default behavior of prepending hostname/path/to/file to - the destination. If dest ends with '/', it will use the basename of the source - file, similar to the copy module. Obviously this is only handy if the filenames - are unique. + - Allows you to override the default behavior of prepending + hostname/path/to/file to the destination. If dest ends with '/', it + will use the basename of the source file, similar to the copy module. + Obviously this is only handy if the filenames are unique. requirements: [] author: Michael DeHaan ''' From 410862d631180902abd18e74454bf723c6d8b0cb Mon Sep 17 00:00:00 2001 From: mrsheepuk Date: Tue, 17 Feb 2015 13:48:59 +0000 Subject: [PATCH 172/236] Doc change - make clearer usage for existing remote file Having read the doc for this module several times and completely missing that it can be used for existing remote archives, I propose this update to the wording to make clear from the top the two ways in which this module can be used. --- files/unarchive.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/files/unarchive.py b/files/unarchive.py index 8e9c90fedcc..7804d1bc02c 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -23,14 +23,14 @@ DOCUMENTATION = ''' --- module: unarchive version_added: 1.4 -short_description: Copies an archive to a remote location and unpack it +short_description: Unpacks an archive after (optionally) copying it from the local machine. extends_documentation_fragment: files description: - - The M(unarchive) module copies an archive file from the local machine to a remote and unpacks it. + - The M(unarchive) module unpacks an archive. By default, it will copy the source file from the local system to the target before unpacking - set copy=no to unpack an archive which already exists on the target.. options: src: description: - - Local path to archive file to copy to the remote server; can be absolute or relative. + - If copy=yes (default), local path to archive file to copy to the target server; can be absolute or relative. If copy=no, path on the target server to existing archive file to unpack. required: true default: null dest: @@ -40,7 +40,7 @@ options: default: null copy: description: - - "if true, the file is copied from the 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine." + - "If true, the file is copied from local 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine." required: false choices: [ "yes", "no" ] default: "yes" From 3dbf65f63c7e03798b79cce43a585258bb7f03f0 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 17 Feb 2015 10:45:29 -0600 Subject: [PATCH 173/236] DRY up duplicate code --- cloud/rackspace/rax.py | 91 +++++++++++++++++------------------------- 1 file changed, 37 insertions(+), 54 deletions(-) diff --git a/cloud/rackspace/rax.py b/cloud/rackspace/rax.py index 1515865713c..288d7307b82 100644 --- a/cloud/rackspace/rax.py +++ b/cloud/rackspace/rax.py @@ -240,6 +240,39 @@ except ImportError: HAS_PYRAX = False +def rax_find_server_image(module, server, image, boot_volume): + if not image and boot_volume: + vol = rax_find_bootable_volume(module, pyrax, server, + exit=False) + if not vol: + return None + volume_image_metadata = vol.volume_image_metadata + vol_image_id = volume_image_metadata.get('image_id') + if vol_image_id: + server_image = rax_find_image(module, pyrax, + vol_image_id, exit=False) + if server_image: + server.image = dict(id=server_image) + + # Match image IDs taking care of boot from volume + if image and not server.image: + vol = rax_find_bootable_volume(module, pyrax, server) + volume_image_metadata = vol.volume_image_metadata + vol_image_id = volume_image_metadata.get('image_id') + if not vol_image_id: + return None + server_image = rax_find_image(module, pyrax, + vol_image_id, exit=False) + if image != server_image: + return None + + server.image = dict(id=server_image) + elif image and server.image['id'] != image: + return None + + return server.image + + def create(module, names=[], flavor=None, image=None, meta={}, key_name=None, files={}, wait=True, wait_timeout=300, disk_config=None, group=None, nics=[], extra_create_args={}, user_data=None, @@ -639,33 +672,8 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, if server.status == 'DELETED': continue - if not image and boot_volume: - vol = rax_find_bootable_volume(module, pyrax, server, - exit=False) - if not vol: - continue - volume_image_metadata = vol.volume_image_metadata - vol_image_id = volume_image_metadata.get('image_id') - if vol_image_id: - server_image = rax_find_image(module, pyrax, - vol_image_id, exit=False) - if server_image: - server.image = dict(id=server_image) - - # Match image IDs taking care of boot from volume - if image and not server.image: - vol = rax_find_bootable_volume(module, pyrax, server) - volume_image_metadata = vol.volume_image_metadata - vol_image_id = volume_image_metadata.get('image_id') - if not vol_image_id: - continue - server_image = rax_find_image(module, pyrax, - vol_image_id, exit=False) - if image != server_image: - continue - - server.image = dict(id=server_image) - elif image and server.image['id'] != image: + if not rax_find_server_image(module, server, image, + boot_volume): continue # Ignore servers with non matching metadata @@ -736,33 +744,8 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, if server.status == 'DELETED': continue - if not image and boot_volume: - vol = rax_find_bootable_volume(module, pyrax, server, - exit=False) - if not vol: - continue - volume_image_metadata = vol.volume_image_metadata - vol_image_id = volume_image_metadata.get('image_id') - if vol_image_id: - server_image = rax_find_image(module, pyrax, - vol_image_id, exit=False) - if server_image: - server.image = dict(id=server_image) - - # Match image IDs taking care of boot from volume - if image and not server.image: - vol = rax_find_bootable_volume(module, pyrax, server) - volume_image_metadata = vol.volume_image_metadata - vol_image_id = volume_image_metadata.get('image_id') - if not vol_image_id: - continue - server_image = rax_find_image(module, pyrax, - vol_image_id, exit=False) - if image != server_image: - continue - - server.image = dict(id=server_image) - elif image and server.image['id'] != image: + if not rax_find_server_image(module, server, image, + boot_volume): continue # Ignore servers with non matching metadata From c2f731e5bdb18c285f9bb79044f3d6e521b64cbc Mon Sep 17 00:00:00 2001 From: Dan Rue Date: Tue, 17 Feb 2015 15:19:22 -0600 Subject: [PATCH 174/236] Do not mark "skipped" when changed is false When using the "creates" option with the uri module, set changed to False if the file already exists. This behavior is consistent with other modules which use "creates", such as command and shell. --- network/basics/uri.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index aac724a8f13..9be0a06cdce 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -381,7 +381,7 @@ def main(): # of uri executions. creates = os.path.expanduser(creates) if os.path.exists(creates): - module.exit_json(stdout="skipped, since %s exists" % creates, skipped=True, changed=False, stderr=False, rc=0) + module.exit_json(stdout="skipped, since %s exists" % creates, changed=False, stderr=False, rc=0) if removes is not None: # do not run the command if the line contains removes=filename @@ -389,7 +389,7 @@ def main(): # of uri executions. v = os.path.expanduser(removes) if not os.path.exists(removes): - module.exit_json(stdout="skipped, since %s does not exist" % removes, skipped=True, changed=False, stderr=False, rc=0) + module.exit_json(stdout="skipped, since %s does not exist" % removes, changed=False, stderr=False, rc=0) # httplib2 only sends authentication after the server asks for it with a 401. From 7c42182f4711711853600644334846132868595e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 18 Feb 2015 08:59:26 -0500 Subject: [PATCH 175/236] Revert "Don't use 'tenancy' in ec2 spot requests" --- cloud/amazon/ec2.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 4d65fbf9841..d34931c9914 100755 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -834,8 +834,7 @@ def create_instances(module, ec2, override_count=None): if ebs_optimized: params['ebs_optimized'] = ebs_optimized - # 'tenancy' always has a default value, but it is not a valid parameter for spot instance resquest - if not spot_price: + if tenancy: params['tenancy'] = tenancy if boto_supports_profile_name_arg(ec2): From 77241123a7d1f188dc5d6d5d162dbf0da932f098 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 18 Feb 2015 11:07:13 -0500 Subject: [PATCH 176/236] Revert "Revert "Don't use 'tenancy' in ec2 spot requests"" --- cloud/amazon/ec2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index d34931c9914..4d65fbf9841 100755 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -834,7 +834,8 @@ def create_instances(module, ec2, override_count=None): if ebs_optimized: params['ebs_optimized'] = ebs_optimized - if tenancy: + # 'tenancy' always has a default value, but it is not a valid parameter for spot instance resquest + if not spot_price: params['tenancy'] = tenancy if boto_supports_profile_name_arg(ec2): From 53ef859ae49de85c079a2e2518d0f2a6ca230787 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 18 Feb 2015 13:32:50 -0500 Subject: [PATCH 177/236] fix instance=None documentation --- cloud/amazon/ec2_vol.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 7fd58fa5348..01a539ae4b0 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -24,9 +24,9 @@ version_added: "1.1" options: instance: description: - - instance ID if you wish to attach the volume. + - instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach. required: false - default: null + default: null aliases: [] name: description: @@ -152,12 +152,12 @@ EXAMPLES = ''' image: "{{ image }}" zone: YYYYYY id: my_instance - wait: yes + wait: yes count: 1 register: ec2 - ec2_vol: - instance: "{{ item.id }}" + instance: "{{ item.id }}" name: my_existing_volume_Name_tag device_name: /dev/xvdf with_items: ec2.instances @@ -168,7 +168,7 @@ EXAMPLES = ''' id: vol-XXXXXXXX state: absent -# Detach a volume +# Detach a volume (since 1.9) - ec2_vol: id: vol-XXXXXXXX instance: None @@ -177,7 +177,7 @@ EXAMPLES = ''' - ec2_vol: instance: i-XXXXXX state: list - + # Create new volume using SSD storage - ec2_vol: instance: XXXXXX From 5df3058aabe27c0e02f1f89364e5bda8b67a666c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 18 Feb 2015 15:22:24 -0500 Subject: [PATCH 178/236] fix for mount w/o opts (bug introduced when fixing bind mounts) --- system/mount.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/mount.py b/system/mount.py index d1104b361e1..d415d65b0d5 100644 --- a/system/mount.py +++ b/system/mount.py @@ -320,7 +320,7 @@ def main(): if os.path.ismount(name): if changed: res,msg = mount(module, **args) - elif "bind" in args['opts']: + elif 'bind' in args.get('opts', []): changed = True cmd = 'mount -l' rc, out, err = module.run_command(cmd) From 3fbfd7351b7c3882c7f7214c8eec6b2c71595c09 Mon Sep 17 00:00:00 2001 From: Edward Date: Wed, 18 Feb 2015 21:23:11 -0500 Subject: [PATCH 179/236] Fixes #816 Backups not enabled on new droplets. Convert backups_enabled string to lowercase. Similar to fix for private networking. --- cloud/digital_ocean/digital_ocean.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/digital_ocean/digital_ocean.py b/cloud/digital_ocean/digital_ocean.py index 7e0a432c8dc..f3a904e8b24 100644 --- a/cloud/digital_ocean/digital_ocean.py +++ b/cloud/digital_ocean/digital_ocean.py @@ -237,7 +237,8 @@ class Droplet(JsonfyMixIn): @classmethod def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False): private_networking_lower = str(private_networking).lower() - json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking_lower, backups_enabled) + backups_enabled_lower = str(backups_enabled).lower() + json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking_lower, backups_enabled_lower) droplet = cls(json) return droplet From bc0c1692b52ea24a4523a0d52821d27bb12e99e9 Mon Sep 17 00:00:00 2001 From: Bret Martin Date: Thu, 4 Dec 2014 15:12:26 -0500 Subject: [PATCH 180/236] ec2: make group only match group names in this VPC --- cloud/amazon/ec2.py | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 4d65fbf9841..47f75cf1871 100755 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -495,6 +495,7 @@ try: import boto.ec2 from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping from boto.exception import EC2ResponseError + from boto.vpc import VPCConnection except ImportError: print "failed=True msg='boto required for this module'" sys.exit(1) @@ -675,7 +676,7 @@ def boto_supports_param_in_spot_request(ec2, param): method = getattr(ec2, 'request_spot_instances') return param in method.func_code.co_varnames -def enforce_count(module, ec2): +def enforce_count(module, ec2, vpc): exact_count = module.params.get('exact_count') count_tag = module.params.get('count_tag') @@ -700,7 +701,7 @@ def enforce_count(module, ec2): to_create = exact_count - len(instances) if not checkmode: (instance_dict_array, changed_instance_ids, changed) \ - = create_instances(module, ec2, override_count=to_create) + = create_instances(module, ec2, vpc, override_count=to_create) for inst in instance_dict_array: instances.append(inst) @@ -731,7 +732,7 @@ def enforce_count(module, ec2): return (all_instances, instance_dict_array, changed_instance_ids, changed) -def create_instances(module, ec2, override_count=None): +def create_instances(module, ec2, vpc, override_count=None): """ Creates new instances @@ -780,10 +781,16 @@ def create_instances(module, ec2, override_count=None): module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)")) sys.exit(1) + if vpc_subnet_id: + vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id + try: # Here we try to lookup the group id from the security group name - if group is set. if group_name: - grp_details = ec2.get_all_security_groups() + if vpc_id: + grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id}) + else: + grp_details = ec2.get_all_security_groups() if type(group_name) == list: group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] elif type(group_name) == str: @@ -1197,6 +1204,20 @@ def main(): ec2 = ec2_connect(module) + ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + + if region: + try: + vpc = boto.vpc.connect_to_region( + region, + aws_access_key_id=aws_access_key, + aws_secret_access_key=aws_secret_key + ) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg = str(e)) + else: + module.fail_json(msg="region must be specified") + tagged_instances = [] state = module.params.get('state') @@ -1221,9 +1242,9 @@ def main(): module.fail_json(msg='image parameter is required for new instance') if module.params.get('exact_count') is None: - (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2) + (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc) else: - (tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2) + (tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc) module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances) From 57f0d2248249d5cca7f4876bd7c1e01ecb419482 Mon Sep 17 00:00:00 2001 From: Jorge-Rodriguez Date: Fri, 16 Jan 2015 12:26:13 +0200 Subject: [PATCH 181/236] Added optional parameter scheduler_hints. --- cloud/openstack/nova_compute.py | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/cloud/openstack/nova_compute.py b/cloud/openstack/nova_compute.py index a9bd05b1e3f..e4e1cae6c80 100644 --- a/cloud/openstack/nova_compute.py +++ b/cloud/openstack/nova_compute.py @@ -22,7 +22,7 @@ import os try: from novaclient.v1_1 import client as nova_client - from novaclient.v1_1 import floating_ips + from novaclient.v1_1 import floating_ips from novaclient import exceptions from novaclient import utils import time @@ -168,6 +168,12 @@ options: required: false default: None version_added: "1.6" + scheduler_hints: + description: + - Arbitrary key/value pairs to the scheduler for custom use + required: false + default: None + version_added: "1.9" requirements: ["novaclient"] ''' @@ -294,15 +300,15 @@ def _add_floating_ip_from_pool(module, nova, server): # instantiate FloatingIPManager object floating_ip_obj = floating_ips.FloatingIPManager(nova) - # empty dict and list - usable_floating_ips = {} + # empty dict and list + usable_floating_ips = {} pools = [] # user specified pools = module.params['floating_ip_pools'] - # get the list of all floating IPs. Mileage may - # vary according to Nova Compute configuration + # get the list of all floating IPs. Mileage may + # vary according to Nova Compute configuration # per cloud provider all_floating_ips = floating_ip_obj.list() @@ -378,9 +384,9 @@ def _add_floating_ip(module, nova, server): else: return server - # this may look redundant, but if there is now a + # this may look redundant, but if there is now a # floating IP, then it needs to be obtained from - # a recent server object if the above code path exec'd + # a recent server object if the above code path exec'd try: server = nova.servers.get(server.id) except Exception, e: @@ -422,7 +428,7 @@ def _create_server(module, nova): 'config_drive': module.params['config_drive'], } - for optional_param in ('region_name', 'key_name', 'availability_zone'): + for optional_param in ('region_name', 'key_name', 'availability_zone', 'scheduler_hints'): if module.params[optional_param]: bootkwargs[optional_param] = module.params[optional_param] try: @@ -443,7 +449,7 @@ def _create_server(module, nova): private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private') public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public') - # now exit with info + # now exit with info module.exit_json(changed = True, id = server.id, private_ip=''.join(private), public_ip=''.join(public), status = server.status, info = server._info) if server.status == 'ERROR': @@ -543,6 +549,7 @@ def main(): auto_floating_ip = dict(default=False, type='bool'), floating_ips = dict(default=None), floating_ip_pools = dict(default=None), + scheduler_hints = dict(default=None), )) module = AnsibleModule( argument_spec=argument_spec, @@ -582,4 +589,3 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.openstack import * main() - From af31cb8fafc6594a225d2a94e002c29be6c26415 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 20 Feb 2015 09:28:11 -0500 Subject: [PATCH 182/236] now captures connection exceptions and returns in 'nicer' module failure --- cloud/amazon/ec2_ami.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_ami.py b/cloud/amazon/ec2_ami.py index ab1f986356b..401b667c545 100644 --- a/cloud/amazon/ec2_ami.py +++ b/cloud/amazon/ec2_ami.py @@ -242,7 +242,10 @@ def main(): ) module = AnsibleModule(argument_spec=argument_spec) - ec2 = ec2_connect(module) + try: + ec2 = ec2_connect(module) + except Exception, e: + module.json_fail(msg="Error while connecting to aws: %s" % str(e)) if module.params.get('state') == 'absent': if not module.params.get('image_id'): From cbf42c95b2427eb0bdfec0cc3d0b38b0855274ea Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 12 Feb 2015 22:11:32 -0500 Subject: [PATCH 183/236] draft for documenting module returns --- files/acl.py | 8 +++++++ files/copy.py | 62 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+) diff --git a/files/acl.py b/files/acl.py index 9790f8c927f..3fa403cbfd6 100644 --- a/files/acl.py +++ b/files/acl.py @@ -102,6 +102,14 @@ EXAMPLES = ''' register: acl_info ''' +RETURN = ''' +acl: + description: Current acl on provided path (after changes, if any) + returned: always + type: list + sample: [ "user::rwx", "group::rwx", "other::rwx" ] +''' + def normalize_permissions(p): perms = ['-','-','-'] for char in p: diff --git a/files/copy.py b/files/copy.py index 364996f5293..bfd30d315b1 100644 --- a/files/copy.py +++ b/files/copy.py @@ -108,6 +108,68 @@ EXAMPLES = ''' - copy: src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s' ''' +RETURN = ''' +dest: + description: destination file/path + returned: always + type: string + sample: "/path/to/file.txt" +src: + description: source file used for the copy on the target machine + returned: changed + type: string + sample: "/home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source" +md5sum: + description: md5 checksum of the file after running copy + returned: when supported + type: string + sample: "2a5aeecc61dc98c4d780b14b330e3282", +checksum: + description: checksum of the file after running copy + returned: always + type: string + sample: "6e642bb8dd5c2e027bf21dd923337cbb4214f827" +backup_file: + description: name of backup file created + returned: changed and if backup=yes + type: string + sample: "/path/to/file.txt.2015-02-12@22:09~" +gid: + description: group id of the file, after execution + returned: always + type: int + sample: 100 +group: + description: group of the file, after execution + returned: always + type: string + sample: "httpd" +owner: + description: owner of the file, after execution + returned: always + type: string + sample: "httpd" +uid: 100 + description: owner id of the file, after execution + returned: always + type: int + sample: 100 +mode: + description: permissions of the target, after execution + returned: always + type: string + sample: "0644" +size: + description: size of the target, after execution + returned: always + type: int + sample: 1220 +state: + description: permissions of the target, after execution + returned: always + type: string + sample: "file" +''' def split_pre_existing_dir(dirname): ''' From 13cdadcc4d213252e6f4374305c15c9838d73df5 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 19 Feb 2015 15:36:36 -0500 Subject: [PATCH 184/236] removed always for success, as data wont show on fail --- files/acl.py | 2 +- files/copy.py | 18 +++--- files/stat.py | 173 ++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 183 insertions(+), 10 deletions(-) diff --git a/files/acl.py b/files/acl.py index 3fa403cbfd6..0c568ba59a5 100644 --- a/files/acl.py +++ b/files/acl.py @@ -105,7 +105,7 @@ EXAMPLES = ''' RETURN = ''' acl: description: Current acl on provided path (after changes, if any) - returned: always + returned: success type: list sample: [ "user::rwx", "group::rwx", "other::rwx" ] ''' diff --git a/files/copy.py b/files/copy.py index bfd30d315b1..7e1ea1db9c0 100644 --- a/files/copy.py +++ b/files/copy.py @@ -111,7 +111,7 @@ EXAMPLES = ''' RETURN = ''' dest: description: destination file/path - returned: always + returned: success type: string sample: "/path/to/file.txt" src: @@ -126,7 +126,7 @@ md5sum: sample: "2a5aeecc61dc98c4d780b14b330e3282", checksum: description: checksum of the file after running copy - returned: always + returned: success type: string sample: "6e642bb8dd5c2e027bf21dd923337cbb4214f827" backup_file: @@ -136,37 +136,37 @@ backup_file: sample: "/path/to/file.txt.2015-02-12@22:09~" gid: description: group id of the file, after execution - returned: always + returned: success type: int sample: 100 group: description: group of the file, after execution - returned: always + returned: success type: string sample: "httpd" owner: description: owner of the file, after execution - returned: always + returned: success type: string sample: "httpd" uid: 100 description: owner id of the file, after execution - returned: always + returned: success type: int sample: 100 mode: description: permissions of the target, after execution - returned: always + returned: success type: string sample: "0644" size: description: size of the target, after execution - returned: always + returned: success type: int sample: 1220 state: description: permissions of the target, after execution - returned: always + returned: success type: string sample: "file" ''' diff --git a/files/stat.py b/files/stat.py index 484da2136d9..a98e752ccf6 100644 --- a/files/stat.py +++ b/files/stat.py @@ -69,6 +69,179 @@ EXAMPLES = ''' - stat: path=/path/to/myhugefile get_md5=no ''' +RETURN = ''' +stat: + description: dictionary containing all the stat data + returned: success + type: dictionary + contains: + exists: + description: if the destination path actually exists or not + returned: success + type: boolean + sample: True + path: + description: The full path of the file/object to get the facts of + returned: success + type: boolean + sample: '/path/to/file' + mode: + description: Unix permissions of the file in octal + returned: success, path exists and user can read stats + type: octal + sample: 1755 + isdir: + description: Tells you if the path is a directory + returned: success, path exists and user can read stats + type: boolean + sample: False + ischr: + description: Tells you if the path is a character device + returned: success, path exists and user can read stats + type: boolean + sample: False + isblk: + description: Tells you if the path is a block device + returned: success, path exists and user can read stats + type: boolean + sample: False + isreg: + description: Tells you if the path is a regular file + returned: success, path exists and user can read stats + type: boolean + sample: True + isfifo: + description: Tells you if the path is a named pipe + returned: success, path exists and user can read stats + type: boolean + sample: False + islnk: + description: Tells you if the path is a symbolic link + returned: success, path exists and user can read stats + type: boolean + sample: False + issock: + description: Tells you if the path is a unix domain socket + returned: success, path exists and user can read stats + type: boolean + sample: False + uid: + description: Numeric id representing the file owner + returned: success, path exists and user can read stats + type: int + sample: 1003 + gid: + description: Numeric id representing the group of the owner + returned: success, path exists and user can read stats + type: int + sample: 1003 + size: + description: Size in bytes for a plain file, ammount of data for some special files + returned: success, path exists and user can read stats + type: int + sample: 203 + inode: + description: Inode number of the path + returned: success, path exists and user can read stats + type: int + sample: 12758 + dev: + description: Device the inode resides on + returned: success, path exists and user can read stats + type: int + sample: 33 + nlink: + description: Number of links to the inode (hard links) + returned: success, path exists and user can read stats + type: int + sample: 1 + atime: + description: Time of last access + returned: success, path exists and user can read stats + type: float + sample: 1424348972.575 + mtime: st.st_mtime, + description: Time of last modification + returned: success, path exists and user can read stats + type: float + sample: 1424348972.575 + ctime: + description: Time of last metadata update or creation (depends on OS) + returned: success, path exists and user can read stats + type: float + sample: 1424348972.575 + wusr: + description: Tells you if the owner has write permission + returned: success, path exists and user can read stats + type: boolean + sample: True + rusr: + description: Tells you if the owner has read permission + returned: success, path exists and user can read stats + type: boolean + sample: True + xusr: + description: Tells you if the owner has execute permission + returned: success, path exists and user can read stats + type: boolean + sample: True + wgrp: + description: Tells you if the owner's group has write permission + returned: success, path exists and user can read stats + type: boolean + sample: False + rgrp: + description: Tells you if the owner's group has read permission + returned: success, path exists and user can read stats + type: boolean + sample: True + xgrp: + description: Tells you if the owner's group has execute permission + returned: success, path exists and user can read stats + type: boolean + sample: True + woth: + description: Tells you if others have write permission + returned: success, path exists and user can read stats + type: boolean + sample: False + roth: + description: Tells you if others have read permission + returned: success, path exists and user can read stats + type: boolean + sample: True + xoth: + description: Tells you if others have execute permission + returned: success, path exists and user can read stats + type: boolean + sample: True + lnk_source: + description: Original path + returned: success, path exists and user can read stats and the path is a symbolic link + type: boolean + sample: True + md5: + description: md5 hash of the path + returned: success, path exists and user can read stats and path supports hashing and md5 is supported + type: boolean + sample: True + checksum: + description: hash of the path + returned: success, path exists and user can read stats and path supports hashing + type: boolean + sample: True + pw_name: + description: User name of owner + returned: success, path exists and user can read stats and installed python supports it + type: string + sample: httpd + gr_name: + description: + returned: success, path exists and user can read stats and installed python supports it + type: string + sample: www-data +''' + import os import sys from stat import * From 6e654f2442d8ad7fc707677d534cc012ba7c8f15 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 19 Feb 2015 16:43:20 -0500 Subject: [PATCH 185/236] fixed issues with stats return docs as per feedback --- files/stat.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/files/stat.py b/files/stat.py index a98e752ccf6..49d5f166d34 100644 --- a/files/stat.py +++ b/files/stat.py @@ -82,7 +82,7 @@ stat: sample: True path: description: The full path of the file/object to get the facts of - returned: success + returned: success and if path exists type: boolean sample: '/path/to/file' mode: @@ -160,7 +160,7 @@ stat: returned: success, path exists and user can read stats type: float sample: 1424348972.575 - mtime: st.st_mtime, + mtime: description: Time of last modification returned: success, path exists and user can read stats type: float @@ -215,6 +215,16 @@ stat: returned: success, path exists and user can read stats type: boolean sample: True + isuid: + description: Tells you if the invoking user's id matches the owner's id + returned: success, path exists and user can read stats + type: boolean + sample: False + isrid: + description: Tells you if the invoking user's group id matches the owner's group id + returned: success, path exists and user can read stats + type: boolean + sample: False lnk_source: description: Original path returned: success, path exists and user can read stats and the path is a symbolic link @@ -236,7 +246,7 @@ stat: type: string sample: httpd gr_name: - description: + description: Group name of owner returned: success, path exists and user can read stats and installed python supports it type: string sample: www-data From dbd8ea1e7de75eeaf8c9aaaa0f29702fd3450c99 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 20 Feb 2015 09:57:27 -0500 Subject: [PATCH 186/236] fixed typo on isgid --- files/stat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/stat.py b/files/stat.py index 49d5f166d34..fbf2d4cb8f6 100644 --- a/files/stat.py +++ b/files/stat.py @@ -220,7 +220,7 @@ stat: returned: success, path exists and user can read stats type: boolean sample: False - isrid: + isgid: description: Tells you if the invoking user's group id matches the owner's group id returned: success, path exists and user can read stats type: boolean From dc470cc91f9cb77cc107d9b1eaf73f12b246b3df Mon Sep 17 00:00:00 2001 From: "Stefan J. Betz" Date: Thu, 18 Dec 2014 22:47:09 +0100 Subject: [PATCH 187/236] Create SSH Keys always after creating $HOME --- system/user.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/system/user.py b/system/user.py index 0939faf313a..e4a52806718 100755 --- a/system/user.py +++ b/system/user.py @@ -1917,6 +1917,16 @@ def main(): if user.groups is not None: result['groups'] = user.groups + # handle missing homedirs + info = user.user_info() + if user.home is None: + user.home = info[5] + if not os.path.exists(user.home) and user.createhome: + if not module.check_mode: + user.create_homedir(user.home) + user.chown_homedir(info[2], info[3], user.home) + result['changed'] = True + # deal with ssh key if user.sshkeygen: (rc, out, err) = user.ssh_key_gen() @@ -1932,16 +1942,6 @@ def main(): result['ssh_key_file'] = user.get_ssh_key_path() result['ssh_public_key'] = user.get_ssh_public_key() - # handle missing homedirs - info = user.user_info() - if user.home is None: - user.home = info[5] - if not os.path.exists(user.home) and user.createhome: - if not module.check_mode: - user.create_homedir(user.home) - user.chown_homedir(info[2], info[3], user.home) - result['changed'] = True - module.exit_json(**result) # import module snippets From 2c520f31b3f6b1cc5032fdeefccc4c2b7b6a2850 Mon Sep 17 00:00:00 2001 From: blxd Date: Mon, 23 Feb 2015 11:14:38 +0000 Subject: [PATCH 188/236] Added check mode support to the ini_file module --- files/ini_file.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/files/ini_file.py b/files/ini_file.py index 756f2732a84..e247c265fc8 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -97,9 +97,9 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese changed = False if (sys.version_info[0] == 2 and sys.version_info[1] >= 7) or sys.version_info[0] >= 3: - cp = ConfigParser.ConfigParser(allow_no_value=True) + cp = ConfigParser.ConfigParser(allow_no_value=True) else: - cp = ConfigParser.ConfigParser() + cp = ConfigParser.ConfigParser() cp.optionxform = identity try: @@ -126,7 +126,7 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese if state == 'present': # DEFAULT section is always there by DEFAULT, so never try to add it. - if cp.has_section(section) == False and section.upper() != 'DEFAULT': + if not cp.has_section(section) and section.upper() != 'DEFAULT': cp.add_section(section) changed = True @@ -144,7 +144,7 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese cp.set(section, option, value) changed = True - if changed: + if changed and not module.check_mode: if backup: module.backup_local(filename) @@ -152,7 +152,7 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese f = open(filename, 'w') cp.write(f) except: - module.fail_json(msg="Can't creat %s" % filename) + module.fail_json(msg="Can't create %s" % filename) return changed @@ -183,7 +183,8 @@ def main(): backup = dict(default='no', type='bool'), state = dict(default='present', choices=['present', 'absent']) ), - add_file_common_args = True + add_file_common_args = True, + supports_check_mode = True ) info = dict() From d2158708ff613afea859faa6c7363ecbaa04539d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 23 Feb 2015 12:43:10 -0500 Subject: [PATCH 189/236] undeprecated docker_image until replacement actually arives --- cloud/docker/{_docker_image.py => docker_image.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename cloud/docker/{_docker_image.py => docker_image.py} (100%) diff --git a/cloud/docker/_docker_image.py b/cloud/docker/docker_image.py similarity index 100% rename from cloud/docker/_docker_image.py rename to cloud/docker/docker_image.py From 9967aa668184163eec189a7d1978210012adffbe Mon Sep 17 00:00:00 2001 From: Gerard Lynch Date: Sat, 21 Feb 2015 17:44:37 +0000 Subject: [PATCH 190/236] wait_for: updated docs, must be resolvable hostname, updated example --- utilities/logic/wait_for.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/utilities/logic/wait_for.py b/utilities/logic/wait_for.py index ae316fe1a17..d13c275fc41 100644 --- a/utilities/logic/wait_for.py +++ b/utilities/logic/wait_for.py @@ -54,7 +54,7 @@ version_added: "0.7" options: host: description: - - hostname or IP address to wait for + - A resolvable hostname or IP address to wait for required: false default: "127.0.0.1" aliases: [] @@ -123,8 +123,9 @@ EXAMPLES = ''' # wait until the process is finished and pid was destroyed - wait_for: path=/proc/3466/status state=absent -# Wait 300 seconds for port 22 to become open and contain "OpenSSH", don't start checking for 10 seconds -- local_action: wait_for port=22 host="{{ inventory_hostname }}" search_regex=OpenSSH delay=10 +# wait 300 seconds for port 22 to become open and contain "OpenSSH", don't assume the inventory_hostname is resolvable +# and don't start checking for 10 seconds +- local_action: wait_for port=22 host="{{ ansible_ssh_host | default(inventory_hostname) }}" search_regex=OpenSSH delay=10 ''' From 7a50642c20ae98da657a3416b34760c082d42c74 Mon Sep 17 00:00:00 2001 From: Dann Bohn Date: Wed, 25 Feb 2015 08:25:34 -0500 Subject: [PATCH 191/236] rhn_channel using depriciated methods. swapped out with their replacements --- packaging/os/rhn_channel.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/packaging/os/rhn_channel.py b/packaging/os/rhn_channel.py index 05a155f7ca1..42d61f36e66 100644 --- a/packaging/os/rhn_channel.py +++ b/packaging/os/rhn_channel.py @@ -90,17 +90,17 @@ def get_systemid(client, session, sysname): # ------------------------------------------------------- # -def subscribe_channels(channels, client, session, sysname, sys_id): - c = base_channels(client, session, sys_id) - c.append(channels) - return client.channel.software.setSystemChannels(session, sys_id, c) +def subscribe_channels(channelname, client, session, sysname, sys_id): + channels = base_channels(client, session, sys_id) + channels.append(channelname) + return client.system.setChildChannels(session, sys_id, channels) # ------------------------------------------------------- # -def unsubscribe_channels(channels, client, session, sysname, sys_id): - c = base_channels(client, session, sys_id) - c.remove(channels) - return client.channel.software.setSystemChannels(session, sys_id, c) +def unsubscribe_channels(channelname, client, session, sysname, sys_id): + channels = base_channels(client, session, sys_id) + channels.remove(channelname) + return client.system.setChildChannels(session, sys_id, channels) # ------------------------------------------------------- # @@ -167,3 +167,4 @@ def main(): # import module snippets from ansible.module_utils.basic import * main() + From 60e2e43f003ab14f33936ba4aee19699d60db546 Mon Sep 17 00:00:00 2001 From: Michael Laccetti Date: Wed, 25 Feb 2015 08:46:13 -0500 Subject: [PATCH 192/236] Change s3 to use connect_to_region * Allows us to use Ansible/s3 for GovCloud when accessing a pre-existing bucket --- cloud/amazon/s3.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 7b914dd9117..f3f6e222c66 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -339,7 +339,8 @@ def main(): module.fail_json(msg = str(e)) else: try: - s3 = boto.connect_s3(aws_access_key, aws_secret_key) + from boto.s3.connection import OrdinaryCallingFormat + s3 = boto.s3.connect_to_region(location, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, is_secure=True, calling_format=OrdinaryCallingFormat()) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg = str(e)) From addca40604a98e226184529a756b5d318102367d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 25 Feb 2015 06:49:05 -0800 Subject: [PATCH 193/236] Fix documentation to have correct param name --- database/mysql/mysql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 3590fb8e640..2ac75a67680 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -118,7 +118,7 @@ EXAMPLES = """ - mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present # Modifiy user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself. -- mysql_user: name=bob append=true priv=*.*:REQUIRESSL state=present +- mysql_user: name=bob append_privs=true priv=*.*:REQUIRESSL state=present # Ensure no user named 'sally' exists, also passing in the auth credentials. - mysql_user: login_user=root login_password=123456 name=sally state=absent From 4db27877bf3af99f982f471f8580d4f4b61e10c7 Mon Sep 17 00:00:00 2001 From: Edward Larkey Date: Wed, 25 Feb 2015 09:00:31 -0600 Subject: [PATCH 194/236] Fix typo in authorized key module documentation. Added opening double quote --- system/authorized_key.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index 4d2af6a3bcc..38a8b96d83f 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -76,7 +76,7 @@ options: authorized_keys file. Multiple keys can be specified in a single key= string value by separating them by newlines. required: false - choices: [ yes", "no" ] + choices: [ "yes", "no" ] default: "no" version_added: "1.9" description: From 62fa77fbf94d22a65fc2a3663f04e59ba937e565 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 26 Feb 2015 08:43:24 -0500 Subject: [PATCH 195/236] fixed file module to use realpath when following links, readlink could be relative and fail in corner cases as per #852 --- files/file.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/files/file.py b/files/file.py index fd06d2d48cd..4eb6cb0b64f 100644 --- a/files/file.py +++ b/files/file.py @@ -201,7 +201,7 @@ def main(): if state in ['link','hard']: if follow and state == 'link': # use the current target of the link as the source - src = os.readlink(path) + src = os.path.realpath(path) else: module.fail_json(msg='src and dest are required for creating links') @@ -245,7 +245,7 @@ def main(): if state != prev_state: if follow and prev_state == 'link': # follow symlink and operate on original - path = os.readlink(path) + path = os.path.realpath(path) prev_state = get_state(path) file_args['path'] = path @@ -258,7 +258,7 @@ def main(): elif state == 'directory': if follow and prev_state == 'link': - path = os.readlink(path) + path = os.path.realpath(path) prev_state = get_state(path) if prev_state == 'absent': From dd4a6e86bd6a754e65702ea88542b824711ad4b2 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 26 Feb 2015 10:00:49 -0500 Subject: [PATCH 196/236] Deprecate old OpenStack modules There are a completely new set of modules that do all of the things like keystone v3 and auth_plugins and the like correctly. Structurally upgrading these would have been massively disruptive and there is no real good way to do so without breaking people. These modules should be kept around for several releases - they still work for people - and they should get bug fixes. But they should not take new features. New features should go to the os_ modules. --- cloud/openstack/{glance_image.py => _glance_image.py} | 0 cloud/openstack/{keystone_user.py => _keystone_user.py} | 0 cloud/openstack/{nova_compute.py => _nova_compute.py} | 0 cloud/openstack/{nova_keypair.py => _nova_keypair.py} | 0 .../openstack/{quantum_floating_ip.py => _quantum_floating_ip.py} | 0 ...floating_ip_associate.py => _quantum_floating_ip_associate.py} | 0 cloud/openstack/{quantum_network.py => _quantum_network.py} | 0 cloud/openstack/{quantum_router.py => _quantum_router.py} | 0 .../{quantum_router_gateway.py => _quantum_router_gateway.py} | 0 .../{quantum_router_interface.py => _quantum_router_interface.py} | 0 cloud/openstack/{quantum_subnet.py => _quantum_subnet.py} | 0 11 files changed, 0 insertions(+), 0 deletions(-) rename cloud/openstack/{glance_image.py => _glance_image.py} (100%) rename cloud/openstack/{keystone_user.py => _keystone_user.py} (100%) rename cloud/openstack/{nova_compute.py => _nova_compute.py} (100%) rename cloud/openstack/{nova_keypair.py => _nova_keypair.py} (100%) rename cloud/openstack/{quantum_floating_ip.py => _quantum_floating_ip.py} (100%) rename cloud/openstack/{quantum_floating_ip_associate.py => _quantum_floating_ip_associate.py} (100%) rename cloud/openstack/{quantum_network.py => _quantum_network.py} (100%) rename cloud/openstack/{quantum_router.py => _quantum_router.py} (100%) rename cloud/openstack/{quantum_router_gateway.py => _quantum_router_gateway.py} (100%) rename cloud/openstack/{quantum_router_interface.py => _quantum_router_interface.py} (100%) rename cloud/openstack/{quantum_subnet.py => _quantum_subnet.py} (100%) diff --git a/cloud/openstack/glance_image.py b/cloud/openstack/_glance_image.py similarity index 100% rename from cloud/openstack/glance_image.py rename to cloud/openstack/_glance_image.py diff --git a/cloud/openstack/keystone_user.py b/cloud/openstack/_keystone_user.py similarity index 100% rename from cloud/openstack/keystone_user.py rename to cloud/openstack/_keystone_user.py diff --git a/cloud/openstack/nova_compute.py b/cloud/openstack/_nova_compute.py similarity index 100% rename from cloud/openstack/nova_compute.py rename to cloud/openstack/_nova_compute.py diff --git a/cloud/openstack/nova_keypair.py b/cloud/openstack/_nova_keypair.py similarity index 100% rename from cloud/openstack/nova_keypair.py rename to cloud/openstack/_nova_keypair.py diff --git a/cloud/openstack/quantum_floating_ip.py b/cloud/openstack/_quantum_floating_ip.py similarity index 100% rename from cloud/openstack/quantum_floating_ip.py rename to cloud/openstack/_quantum_floating_ip.py diff --git a/cloud/openstack/quantum_floating_ip_associate.py b/cloud/openstack/_quantum_floating_ip_associate.py similarity index 100% rename from cloud/openstack/quantum_floating_ip_associate.py rename to cloud/openstack/_quantum_floating_ip_associate.py diff --git a/cloud/openstack/quantum_network.py b/cloud/openstack/_quantum_network.py similarity index 100% rename from cloud/openstack/quantum_network.py rename to cloud/openstack/_quantum_network.py diff --git a/cloud/openstack/quantum_router.py b/cloud/openstack/_quantum_router.py similarity index 100% rename from cloud/openstack/quantum_router.py rename to cloud/openstack/_quantum_router.py diff --git a/cloud/openstack/quantum_router_gateway.py b/cloud/openstack/_quantum_router_gateway.py similarity index 100% rename from cloud/openstack/quantum_router_gateway.py rename to cloud/openstack/_quantum_router_gateway.py diff --git a/cloud/openstack/quantum_router_interface.py b/cloud/openstack/_quantum_router_interface.py similarity index 100% rename from cloud/openstack/quantum_router_interface.py rename to cloud/openstack/_quantum_router_interface.py diff --git a/cloud/openstack/quantum_subnet.py b/cloud/openstack/_quantum_subnet.py similarity index 100% rename from cloud/openstack/quantum_subnet.py rename to cloud/openstack/_quantum_subnet.py From 544df8a65032803dd008fe94df16efcf4c90c1d1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 26 Feb 2015 10:10:42 -0500 Subject: [PATCH 197/236] fixed add_host docs indentation --- inventory/add_host.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inventory/add_host.py b/inventory/add_host.py index 0f1b84abcd2..c7e066b74ee 100644 --- a/inventory/add_host.py +++ b/inventory/add_host.py @@ -21,7 +21,7 @@ options: required: false notes: - This module bypasses the play host loop and only runs once for all the hosts in the play, if you need it - to iterate use a with_ directive. + to iterate use a with_ directive. author: Seth Vidal ''' From cae8241ea71abfa5e57f5557bb98448badfbca61 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 26 Feb 2015 21:12:27 -0500 Subject: [PATCH 198/236] Add deprecation notices for old OpenStack modules --- cloud/openstack/_glance_image.py | 1 + cloud/openstack/_keystone_user.py | 1 + cloud/openstack/_nova_compute.py | 1 + cloud/openstack/_nova_keypair.py | 1 + cloud/openstack/_quantum_floating_ip.py | 1 + cloud/openstack/_quantum_floating_ip_associate.py | 1 + cloud/openstack/_quantum_network.py | 1 + cloud/openstack/_quantum_router_gateway.py | 1 + cloud/openstack/_quantum_router_interface.py | 1 + cloud/openstack/_quantum_subnet.py | 1 + 10 files changed, 10 insertions(+) diff --git a/cloud/openstack/_glance_image.py b/cloud/openstack/_glance_image.py index 6425fa2ca5d..947b984a761 100644 --- a/cloud/openstack/_glance_image.py +++ b/cloud/openstack/_glance_image.py @@ -20,6 +20,7 @@ DOCUMENTATION = ''' --- module: glance_image version_added: "1.2" +deprecated: Deprecated in 1.9. Use os_image instead short_description: Add/Delete images from glance description: - Add or Remove images from the glance repository. diff --git a/cloud/openstack/_keystone_user.py b/cloud/openstack/_keystone_user.py index 4af254bfe6d..9bc5cc9520f 100644 --- a/cloud/openstack/_keystone_user.py +++ b/cloud/openstack/_keystone_user.py @@ -7,6 +7,7 @@ DOCUMENTATION = ''' --- module: keystone_user version_added: "1.2" +deprecated: Deprecated in 1.9. Use os_keystone_user instead short_description: Manage OpenStack Identity (keystone) users, tenants and roles description: - Manage users,tenants, roles from OpenStack. diff --git a/cloud/openstack/_nova_compute.py b/cloud/openstack/_nova_compute.py index e4e1cae6c80..0b911e7659f 100644 --- a/cloud/openstack/_nova_compute.py +++ b/cloud/openstack/_nova_compute.py @@ -33,6 +33,7 @@ DOCUMENTATION = ''' --- module: nova_compute version_added: "1.2" +deprecated: Deprecated in 1.9. Use os_server instead short_description: Create/Delete VMs from OpenStack description: - Create or Remove virtual machines from Openstack. diff --git a/cloud/openstack/_nova_keypair.py b/cloud/openstack/_nova_keypair.py index c7c9affb3e6..14d3f24259e 100644 --- a/cloud/openstack/_nova_keypair.py +++ b/cloud/openstack/_nova_keypair.py @@ -28,6 +28,7 @@ DOCUMENTATION = ''' --- module: nova_keypair version_added: "1.2" +deprecated: Deprecated in 1.9. Use os_keypair instead short_description: Add/Delete key pair from nova description: - Add or Remove key pair from nova . diff --git a/cloud/openstack/_quantum_floating_ip.py b/cloud/openstack/_quantum_floating_ip.py index 17f78effffd..821e8b608b4 100644 --- a/cloud/openstack/_quantum_floating_ip.py +++ b/cloud/openstack/_quantum_floating_ip.py @@ -31,6 +31,7 @@ DOCUMENTATION = ''' --- module: quantum_floating_ip version_added: "1.2" +deprecated: Deprecated in 1.9. Use os_floating_ip instead short_description: Add/Remove floating IP from an instance description: - Add or Remove a floating IP to an instance diff --git a/cloud/openstack/_quantum_floating_ip_associate.py b/cloud/openstack/_quantum_floating_ip_associate.py index 91df2690b62..88720529d65 100644 --- a/cloud/openstack/_quantum_floating_ip_associate.py +++ b/cloud/openstack/_quantum_floating_ip_associate.py @@ -31,6 +31,7 @@ DOCUMENTATION = ''' --- module: quantum_floating_ip_associate version_added: "1.2" +deprecated: Deprecated in 1.9. Use os_floating_ip instead short_description: Associate or disassociate a particular floating IP with an instance description: - Associates or disassociates a specific floating IP with a particular instance diff --git a/cloud/openstack/_quantum_network.py b/cloud/openstack/_quantum_network.py index 6b0c66e7a12..7c66af07295 100644 --- a/cloud/openstack/_quantum_network.py +++ b/cloud/openstack/_quantum_network.py @@ -29,6 +29,7 @@ DOCUMENTATION = ''' --- module: quantum_network version_added: "1.4" +deprecated: Deprecated in 1.9. Use os_network instead short_description: Creates/Removes networks from OpenStack description: - Add or Remove network from OpenStack. diff --git a/cloud/openstack/_quantum_router_gateway.py b/cloud/openstack/_quantum_router_gateway.py index 5de19fd4785..cbf6a841c5c 100644 --- a/cloud/openstack/_quantum_router_gateway.py +++ b/cloud/openstack/_quantum_router_gateway.py @@ -28,6 +28,7 @@ DOCUMENTATION = ''' --- module: quantum_router_gateway version_added: "1.2" +deprecated: Deprecated in 1.9. Use os_router_gateway instead short_description: set/unset a gateway interface for the router with the specified external network description: - Creates/Removes a gateway interface from the router, used to associate a external network with a router to route external traffic. diff --git a/cloud/openstack/_quantum_router_interface.py b/cloud/openstack/_quantum_router_interface.py index c5828ad4106..3e758d7d920 100644 --- a/cloud/openstack/_quantum_router_interface.py +++ b/cloud/openstack/_quantum_router_interface.py @@ -27,6 +27,7 @@ except ImportError: DOCUMENTATION = ''' --- module: quantum_router_interface +deprecated: Deprecated in 1.9. Use os_router_interface instead version_added: "1.2" short_description: Attach/Dettach a subnet's interface to a router description: diff --git a/cloud/openstack/_quantum_subnet.py b/cloud/openstack/_quantum_subnet.py index e38b2c94aa6..2d485c15962 100644 --- a/cloud/openstack/_quantum_subnet.py +++ b/cloud/openstack/_quantum_subnet.py @@ -29,6 +29,7 @@ DOCUMENTATION = ''' --- module: quantum_subnet version_added: "1.2" +deprecated: Deprecated in 1.9. Use os_subnet instead short_description: Add/remove subnet from a network description: - Add/remove subnet from a network From fafb93490cd5841428aef39cbcf6e0f740e1a983 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 26 Feb 2015 21:35:10 -0600 Subject: [PATCH 199/236] Add private_zone parameter for route53 Fixes #473 --- cloud/amazon/route53.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 7fbe8552f41..38f8dc67c72 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -84,6 +84,12 @@ options: required: false default: 500 aliases: [] + private_zone: + description: + - If set to true, the private zone matching the requested name within the domain will be used if there are both public and private zones. The default is to use the public zone. + required: false + default: false + version_added: "1.9" requirements: [ "boto" ] author: Bruce Pennypacker ''' @@ -175,7 +181,8 @@ def main(): type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True), value = dict(required=False), overwrite = dict(required=False, type='bool'), - retry_interval = dict(required=False, default=500) + retry_interval = dict(required=False, default=500), + private_zone = dict(required=False, type='bool', default=False), ) ) module = AnsibleModule(argument_spec=argument_spec) @@ -187,6 +194,7 @@ def main(): type_in = module.params.get('type') value_in = module.params.get('value') retry_interval_in = module.params.get('retry_interval') + private_zone_in = module.params.get('private_zone') ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) @@ -218,8 +226,11 @@ def main(): zones = {} results = conn.get_all_hosted_zones() for r53zone in results['ListHostedZonesResponse']['HostedZones']: - zone_id = r53zone['Id'].replace('/hostedzone/', '') - zones[r53zone['Name']] = zone_id + # only save this zone id if the private status of the zone matches + # the private_zone_in boolean specified in the params + if module.boolean(r53zone['Config']['PrivateZone']) == private_zone_in: + zone_id = r53zone['Id'].replace('/hostedzone/', '') + zones[r53zone['Name']] = zone_id # Verify that the requested zone is already defined in Route53 if not zone_in in zones: From fa19753707ba93c4fa7c8abc953d3c647e21be25 Mon Sep 17 00:00:00 2001 From: "Oleg A. Mamontov" Date: Fri, 27 Feb 2015 14:39:18 +0300 Subject: [PATCH 200/236] On FreeBSD apply 'login_class' only if there is real change. --- system/user.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/system/user.py b/system/user.py index e4a52806718..4f96ab1e804 100755 --- a/system/user.py +++ b/system/user.py @@ -797,8 +797,17 @@ class FreeBsdUser(User): cmd.append(self.shell) if self.login_class is not None: - cmd.append('-L') - cmd.append(self.login_class) + # find current login class + user_login_class = None + if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK): + for line in open(self.SHADOWFILE).readlines(): + if line.startswith('%s:' % self.name): + user_login_class = line.split(':')[4] + + # act only if login_class change + if self.login_class != user_login_class: + cmd.append('-L') + cmd.append(self.login_class) if self.groups is not None: current_groups = self.user_group_membership() From 2e4091f31dd248d8f364b4e2a19b0a594c8e2d95 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Fri, 27 Feb 2015 07:05:03 -0500 Subject: [PATCH 201/236] One more OpenStack deprecation notice Fixes #858 --- cloud/openstack/_quantum_router.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/openstack/_quantum_router.py b/cloud/openstack/_quantum_router.py index 38d479128f2..154eff84d90 100644 --- a/cloud/openstack/_quantum_router.py +++ b/cloud/openstack/_quantum_router.py @@ -30,6 +30,7 @@ DOCUMENTATION = ''' module: quantum_router version_added: "1.2" short_description: Create or Remove router from openstack +deprecated: Deprecated in 1.9. Use os_router instead description: - Create or Delete routers from OpenStack options: From db38d31f02acfd2b1ea8b29e52c4c4a11ac0c073 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 27 Feb 2015 11:00:21 -0600 Subject: [PATCH 202/236] Updating version added fields for new alias parameters in route53 --- cloud/amazon/route53.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 150b37a4af9..5e38dbc0584 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -58,14 +58,14 @@ options: description: - Indicates if this is an alias record. required: false - version_added: 1.8 + version_added: 1.9 default: False aliases: [] alias_hosted_zone_id: description: - The hosted zone identifier. required: false - version_added: 1.8 + version_added: 1.9 default: null aliases: [] value: From 0029c960e4f8aeeecf566a57e5ae2a6ef5d84106 Mon Sep 17 00:00:00 2001 From: Carlo Mandelli Date: Fri, 9 Jan 2015 18:35:31 +0100 Subject: [PATCH 203/236] Added HP-UX subclass --- system/user.py | 152 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 152 insertions(+) diff --git a/system/user.py b/system/user.py index 4f96ab1e804..046daf20769 100755 --- a/system/user.py +++ b/system/user.py @@ -1821,6 +1821,158 @@ class AIX(User): # =========================================== +class HPUX(User): + """ + This is a HP-UX User manipulation class. + + This overrides the following methods from the generic class:- + - create_user() + - remove_user() + - modify_user() + """ + + platform = 'HP-UX' + distribution = None + SHADOWFILE = '/etc/shadow' + + def create_user(self): + cmd = ['/usr/sam/lbin/useradd.sam'] + + if self.uid is not None: + cmd.append('-u') + cmd.append(self.uid) + + if self.non_unique: + cmd.append('-o') + + if self.group is not None: + if not self.group_exists(self.group): + self.module.fail_json(msg="Group %s does not exist" % self.group) + cmd.append('-g') + cmd.append(self.group) + + if self.groups is not None and len(self.groups): + groups = self.get_groups_set() + cmd.append('-G') + cmd.append(','.join(groups)) + + if self.comment is not None: + cmd.append('-c') + cmd.append(self.comment) + + if self.home is not None: + cmd.append('-d') + cmd.append(self.home) + + if self.shell is not None: + cmd.append('-s') + cmd.append(self.shell) + + if self.password is not None: + cmd.append('-p') + cmd.append(self.password) + + if self.createhome: + cmd.append('-m') + else: + cmd.append('-M') + + if self.system: + cmd.append('-r') + + cmd.append(self.name) + return self.execute_command(cmd) + + def remove_user(self): + cmd = ['/usr/sam/lbin/userdel.sam'] + if self.force: + cmd.append('-F') + if self.remove: + cmd.append('-r') + cmd.append(self.name) + return self.execute_command(cmd) + + def modify_user(self): + cmd = ['/usr/sam/lbin/usermod.sam'] + info = self.user_info() + has_append = self._check_usermod_append() + + if self.uid is not None and info[2] != int(self.uid): + cmd.append('-u') + cmd.append(self.uid) + + if self.non_unique: + cmd.append('-o') + + if self.group is not None: + if not self.group_exists(self.group): + self.module.fail_json(msg="Group %s does not exist" % self.group) + ginfo = self.group_info(self.group) + if info[3] != ginfo[2]: + cmd.append('-g') + cmd.append(self.group) + + if self.groups is not None: + current_groups = self.user_group_membership() + groups_need_mod = False + groups = [] + + if self.groups == '': + if current_groups and not self.append: + groups_need_mod = True + else: + groups = self.get_groups_set(remove_existing=False) + group_diff = set(current_groups).symmetric_difference(groups) + + if group_diff: + if self.append: + for g in groups: + if g in group_diff: + if has_append: + cmd.append('-a') + groups_need_mod = True + break + else: + groups_need_mod = True + + if groups_need_mod: + if self.append and not has_append: + cmd.append('-A') + cmd.append(','.join(group_diff)) + else: + cmd.append('-G') + cmd.append(','.join(groups)) + + + if self.comment is not None and info[4] != self.comment: + cmd.append('-c') + cmd.append(self.comment) + + if self.home is not None and info[5] != self.home: + cmd.append('-d') + cmd.append(self.home) + if self.move_home: + cmd.append('-m') + + if self.shell is not None and info[6] != self.shell: + cmd.append('-s') + cmd.append(self.shell) + + if self.update_password == 'always' and self.password is not None and info[1] != self.password: + cmd.append('-p') + cmd.append(self.password) + + # skip if no changes to be made + if len(cmd) == 1: + return (None, '', '') + elif self.module.check_mode: + return (0, '', '') + + cmd.append(self.name) + return self.execute_command(cmd) + +# =========================================== + def main(): ssh_defaults = { 'bits': '2048', From d90a6f17d4638e7b987976f32004c93df19074fd Mon Sep 17 00:00:00 2001 From: Scott Miller Date: Fri, 27 Feb 2015 14:30:07 -0500 Subject: [PATCH 204/236] fix typo --- files/ini_file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/ini_file.py b/files/ini_file.py index 756f2732a84..869146f61f7 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -152,7 +152,7 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese f = open(filename, 'w') cp.write(f) except: - module.fail_json(msg="Can't creat %s" % filename) + module.fail_json(msg="Can't create %s" % filename) return changed From 4a195c64f174ee6d6659388c97a3733d00d978cb Mon Sep 17 00:00:00 2001 From: Feanil Patel Date: Wed, 12 Nov 2014 14:31:53 -0500 Subject: [PATCH 205/236] Compare DNS names case insensitively. --- cloud/amazon/route53.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 5e38dbc0584..c499cfa4fdc 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -280,7 +280,7 @@ def main(): decoded_name = rset.name.replace(r'\052', '*') decoded_name = decoded_name.replace(r'\100', '@') - if rset.type == type_in and decoded_name == record_in: + if rset.type == type_in and decoded_name.lower() == record_in.lower(): found_record = True record['zone'] = zone_in record['type'] = rset.type From 5e7605475db7e4b2eb838d19a7f3a7e55d5bb8c9 Mon Sep 17 00:00:00 2001 From: Michael Laccetti Date: Mon, 2 Mar 2015 20:12:54 -0500 Subject: [PATCH 206/236] Moving the import to the top of the file and getting rid of the redundancy I sure do wish I knew where the whitespace change was coming from, though. Frustrating. --- cloud/amazon/s3.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index f3f6e222c66..1b2921a50bc 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -125,6 +125,8 @@ import os import urlparse import hashlib +from boto.s3.connection import OrdinaryCallingFormat + try: import boto from boto.s3.connection import Location @@ -321,7 +323,6 @@ def main(): if is_fakes3(s3_url): try: fakes3 = urlparse.urlparse(s3_url) - from boto.s3.connection import OrdinaryCallingFormat s3 = boto.connect_s3( aws_access_key, aws_secret_key, @@ -339,21 +340,20 @@ def main(): module.fail_json(msg = str(e)) else: try: - from boto.s3.connection import OrdinaryCallingFormat s3 = boto.s3.connect_to_region(location, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, is_secure=True, calling_format=OrdinaryCallingFormat()) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg = str(e)) - + # If our mode is a GET operation (download), go through the procedure as appropriate ... if mode == 'get': - + # First, we check to see if the bucket exists, we get "bucket" returned. bucketrtn = bucket_check(module, s3, bucket) if bucketrtn is False: module.fail_json(msg="Target bucket cannot be found", failed=True) # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check. - keyrtn = key_check(module, s3, bucket, obj) + keyrtn = key_check(module, s3, bucket, obj) if keyrtn is False: module.fail_json(msg="Target key cannot be found", failed=True) From 94c1b55744eed7b2cf28cb08326f37f6d7f4c0a6 Mon Sep 17 00:00:00 2001 From: Lev Popov Date: Tue, 3 Mar 2015 15:15:45 +0100 Subject: [PATCH 207/236] Allow to spawn instances without external ip in google cloud module --- cloud/google/gce.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/cloud/google/gce.py b/cloud/google/gce.py index 4105baa30f4..e207a4a8ec1 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -122,6 +122,13 @@ options: required: false default: "false" aliases: [] + external_ip: + version_added: "1.9" + description: + - type of external ip, ephemeral by default + required: false + default: "ephemeral" + aliases: [] requirements: [ "libcloud" ] notes: @@ -230,6 +237,12 @@ def get_instance_info(inst): key=lambda disk_info: disk_info['index'])] else: disk_names = [] + + if len(inst.public_ips) == 0: + public_ip = None + else: + public_ip = inst.public_ips[0] + return({ 'image': not inst.image is None and inst.image.split('/')[-1] or None, 'disks': disk_names, @@ -238,7 +251,7 @@ def get_instance_info(inst): 'name': inst.name, 'network': netname, 'private_ip': inst.private_ips[0], - 'public_ip': inst.public_ips[0], + 'public_ip': public_ip, 'status': ('status' in inst.extra) and inst.extra['status'] or None, 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [], 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None, @@ -267,6 +280,10 @@ def create_instances(module, gce, instance_names): tags = module.params.get('tags') zone = module.params.get('zone') ip_forward = module.params.get('ip_forward') + external_ip = module.params.get('external_ip') + + if external_ip == "none": + external_ip = None new_instances = [] changed = False @@ -327,7 +344,8 @@ def create_instances(module, gce, instance_names): try: inst = gce.create_node(name, lc_machine_type, lc_image, location=lc_zone, ex_network=network, ex_tags=tags, - ex_metadata=metadata, ex_boot_disk=pd, ex_can_ip_forward=ip_forward) + ex_metadata=metadata, ex_boot_disk=pd, ex_can_ip_forward=ip_forward, + external_ip=external_ip) changed = True except ResourceExistsError: inst = gce.ex_get_node(name, lc_zone) @@ -418,6 +436,8 @@ def main(): pem_file = dict(), project_id = dict(), ip_forward = dict(type='bool', default=False), + external_ip = dict(choices=['ephemeral', 'none'], + default='ephemeral'), ) ) From 7f5aea980af1236ac6707f0f08ed64253b221c38 Mon Sep 17 00:00:00 2001 From: Brandon W Maister Date: Tue, 3 Mar 2015 13:03:08 -0500 Subject: [PATCH 208/236] ec2: Apply all security groups at instance-creation The code was picking out the first instance from the security groups specified, even when multiple groups were specified. Now we use all of them. --- cloud/amazon/ec2.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index a48eb534b41..2c96d01851e 100755 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -806,8 +806,7 @@ def create_instances(module, ec2, vpc, override_count=None): if type(group_id) == str: group_id = [group_id] grp_details = ec2.get_all_security_groups(group_ids=group_id) - grp_item = grp_details[0] - group_name = [grp_item.name] + group_name = [grp_item.name for grp_item in grp_details] except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg = str(e)) From 939ecd33d4a6706e7ed3d021043143ebff177f8b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 3 Mar 2015 11:22:31 -0800 Subject: [PATCH 209/236] In service_enable, the default of changed is True so we have to set it to False explicitly if it is False. Fixes #875 --- system/service.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/system/service.py b/system/service.py index c712c43ce9f..9d921472349 100644 --- a/system/service.py +++ b/system/service.py @@ -776,6 +776,8 @@ class LinuxService(Service): self.module.fail_json(msg=err) else: self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action) + else: + self.changed = False return From 559af8d88d15819a55742da46062dec420f892bd Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 3 Mar 2015 11:38:08 -0800 Subject: [PATCH 210/236] Clarify the documented behaviour of insertbefore slightly. --- files/lineinfile.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/files/lineinfile.py b/files/lineinfile.py index a89dcf0b331..2f7154e17be 100644 --- a/files/lineinfile.py +++ b/files/lineinfile.py @@ -97,8 +97,8 @@ options: - Used with C(state=present). If specified, the line will be inserted before the last match of specified regular expression. A value is available; C(BOF) for inserting the line at the beginning of the file. - If specified regular expresion has no matches, C(insertbefore) will be - ignored. May not be used with C(backrefs). + If specified regular expresion has no matches, the line will be + inserted at the end of the file. May not be used with C(backrefs). choices: [ 'BOF', '*regex*' ] create: required: false From 9f38f6e6a2f333eda8e99ab7b76ade0dcd9211ff Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 3 Mar 2015 12:07:42 -0800 Subject: [PATCH 211/236] Small code cleanup --- cloud/amazon/ec2.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 2c96d01851e..544f1e8cc84 100755 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -793,17 +793,13 @@ def create_instances(module, ec2, vpc, override_count=None): grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id}) else: grp_details = ec2.get_all_security_groups() - if type(group_name) == list: - group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] - elif type(group_name) == str: - for grp in grp_details: - if str(group_name) in str(grp): - group_id = [str(grp.id)] + if isinstance(group_name, basestring): group_name = [group_name] + group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] # Now we try to lookup the group id testing if group exists. elif group_id: #wrap the group_id in a list if it's not one already - if type(group_id) == str: + if isinstance(group_id, basestring): group_id = [group_id] grp_details = ec2.get_all_security_groups(group_ids=group_id) group_name = [grp_item.name for grp_item in grp_details] From a1135f803dc6baf2ccb85d6c4a6976f621355670 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 3 Mar 2015 14:23:07 -0800 Subject: [PATCH 212/236] Fix for int port assignment in a playbook failing Ports are integer values but the old code was assuming they were strings. When login_port is put into playbook complex_args as an integer the code would fail. This update should make the argument validating make sure we have an integer and then we can send that value directly to the relevant APIs. Fixes #818 --- database/mysql/mysql_db.py | 17 ++++++++++------- database/mysql/mysql_user.py | 4 ++-- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index 7541683401e..c4b7046a0be 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -129,7 +129,7 @@ def db_dump(module, host, user, password, db_name, target, port, socket=None): if socket is not None: cmd += " --socket=%s" % pipes.quote(socket) else: - cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port)) + cmd += " --host=%s --port=%i" % (pipes.quote(host), port) cmd += " %s" % pipes.quote(db_name) if os.path.splitext(target)[-1] == '.gz': cmd = cmd + ' | gzip > ' + pipes.quote(target) @@ -149,7 +149,7 @@ def db_import(module, host, user, password, db_name, target, port, socket=None): if socket is not None: cmd += " --socket=%s" % pipes.quote(socket) else: - cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port)) + cmd += " --host=%s --port=%i" % (pipes.quote(host), port) cmd += " -D %s" % pipes.quote(db_name) if os.path.splitext(target)[-1] == '.gz': gunzip_path = module.get_bin_path('gunzip') @@ -266,7 +266,7 @@ def main(): login_user=dict(default=None), login_password=dict(default=None), login_host=dict(default="localhost"), - login_port=dict(default="3306"), + login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), name=dict(required=True, aliases=['db']), encoding=dict(default=""), @@ -285,6 +285,9 @@ def main(): state = module.params["state"] target = module.params["target"] socket = module.params["login_unix_socket"] + login_port = module.params["login_port"] + if login_port < 0 or login_port > 65535: + module.fail_json(msg="login_port must be a valid unix port number (0-65535)") # make sure the target path is expanded for ~ and $HOME if target is not None: @@ -322,10 +325,10 @@ def main(): except OSError: module.fail_json(msg="%s, does not exist, unable to connect" % socket) db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=socket, user=login_user, passwd=login_password, db=connect_to_db) - elif module.params["login_port"] != "3306" and module.params["login_host"] == "localhost": + elif login_port != 3306 and module.params["login_host"] == "localhost": module.fail_json(msg="login_host is required when login_port is defined, login_host cannot be localhost when login_port is defined") else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password, db=connect_to_db) + db_connection = MySQLdb.connect(host=module.params["login_host"], port=login_port, user=login_user, passwd=login_password, db=connect_to_db) cursor = db_connection.cursor() except Exception, e: if "Unknown database" in str(e): @@ -344,7 +347,7 @@ def main(): elif state == "dump": rc, stdout, stderr = db_dump(module, login_host, login_user, login_password, db, target, - port=module.params['login_port'], + port=login_port, socket=module.params['login_unix_socket']) if rc != 0: module.fail_json(msg="%s" % stderr) @@ -353,7 +356,7 @@ def main(): elif state == "import": rc, stdout, stderr = db_import(module, login_host, login_user, login_password, db, target, - port=module.params['login_port'], + port=login_port, socket=module.params['login_unix_socket']) if rc != 0: module.fail_json(msg="%s" % stderr) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 2ac75a67680..5901771f6ad 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -424,7 +424,7 @@ def connect(module, login_user, login_password): if module.params["login_unix_socket"]: db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password, db="mysql") + db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql") return db_connection.cursor() # =========================================== @@ -437,7 +437,7 @@ def main(): login_user=dict(default=None), login_password=dict(default=None), login_host=dict(default="localhost"), - login_port=dict(default="3306"), + login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), user=dict(required=True, aliases=['name']), password=dict(default=None), From 74e922449b457b39bf23ac38316e03c21bd96e2c Mon Sep 17 00:00:00 2001 From: Sam Yaple Date: Tue, 3 Mar 2015 23:32:15 -0600 Subject: [PATCH 213/236] Add pid mode support to docker module This allows setting the pid namespace for a container. Currently only the 'host' pid namespace is supported. This requires Docker 1.4.1 and docker-py 1.0.0 --- cloud/docker/docker.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index da4fbc123fb..f79f5a2d5d4 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -209,6 +209,13 @@ options: default: false aliases: [] version_added: "1.8" + pid: + description: + - Set the PID namespace mode for the container (currently only supports 'host'). Requires docker-py >= 1.0.0 and docker >= 1.4.1. + required: false + default: false + aliases: [] + version_added: "1.8" registry: description: - The remote registry URL to use for pulling images. @@ -790,6 +797,7 @@ class DockerManager(object): 'privileged': self.module.params.get('privileged'), 'links': self.links, 'network_mode': self.module.params.get('net'), + 'pid_mode': self.module.params.get('pid'), } optionals = {} @@ -872,6 +880,7 @@ def main(): lxc_conf = dict(default=None, type='list'), name = dict(default=None), net = dict(default=None), + pid = dict(default=None), insecure_registry = dict(default=False, type='bool'), ) ) From a15cce2d7f661e540595e1787f790b6478e5d97c Mon Sep 17 00:00:00 2001 From: Lev Popov Date: Wed, 4 Mar 2015 00:49:15 +0100 Subject: [PATCH 214/236] Allow to keep instance boot disk after instance deletion in google cloud module --- cloud/google/gce.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/cloud/google/gce.py b/cloud/google/gce.py index e207a4a8ec1..68203736789 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -129,6 +129,13 @@ options: required: false default: "ephemeral" aliases: [] + disk_auto_delete: + version_added: "1.9" + description: + - if set boot disk will be removed after instance destruction + required: false + default: "true" + aliases: [] requirements: [ "libcloud" ] notes: @@ -281,6 +288,7 @@ def create_instances(module, gce, instance_names): zone = module.params.get('zone') ip_forward = module.params.get('ip_forward') external_ip = module.params.get('external_ip') + disk_auto_delete = module.params.get('disk_auto_delete') if external_ip == "none": external_ip = None @@ -345,7 +353,7 @@ def create_instances(module, gce, instance_names): inst = gce.create_node(name, lc_machine_type, lc_image, location=lc_zone, ex_network=network, ex_tags=tags, ex_metadata=metadata, ex_boot_disk=pd, ex_can_ip_forward=ip_forward, - external_ip=external_ip) + external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete) changed = True except ResourceExistsError: inst = gce.ex_get_node(name, lc_zone) @@ -438,6 +446,7 @@ def main(): ip_forward = dict(type='bool', default=False), external_ip = dict(choices=['ephemeral', 'none'], default='ephemeral'), + disk_auto_delete = dict(type='bool', default=True), ) ) From ee8039ef09bb35142ab2e7b5698248ba2422456d Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Thu, 19 Feb 2015 16:48:14 -0500 Subject: [PATCH 215/236] Now correctly gzip/bzips file back up in case of import failure Removed gunzip and bunzip2 dependency --- database/mysql/mysql_db.py | 52 +++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index 7541683401e..135dd7cb75d 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -152,39 +152,39 @@ def db_import(module, host, user, password, db_name, target, port, socket=None): cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port)) cmd += " -D %s" % pipes.quote(db_name) if os.path.splitext(target)[-1] == '.gz': - gunzip_path = module.get_bin_path('gunzip') - if gunzip_path: - rc, stdout, stderr = module.run_command('%s %s' % (gunzip_path, target)) - if rc != 0: - return rc, stdout, stderr - cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) + gzip_path = module.get_bin_path('gzip') + if not gzip_path: + module.fail_json(msg="gzip command not found") + #gzip -d file (uncompress) + rc, stdout, stderr = module.run_command('%s -d %s' % (gzip_path, target)) + if rc != 0: + return rc, stdout, stderr + #Import sql + cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) + try: rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) if rc != 0: return rc, stdout, stderr - gzip_path = module.get_bin_path('gzip') - if gzip_path: - rc, stdout, stderr = module.run_command('%s %s' % (gzip_path, os.path.splitext(target)[0])) - else: - module.fail_json(msg="gzip command not found") - else: - module.fail_json(msg="gunzip command not found") + finally: + #gzip file back up + module.run_command('%s %s' % (gzip_path, os.path.splitext(target)[0])) elif os.path.splitext(target)[-1] == '.bz2': - bunzip2_path = module.get_bin_path('bunzip2') - if bunzip2_path: - rc, stdout, stderr = module.run_command('%s %s' % (bunzip2_path, target)) - if rc != 0: - return rc, stdout, stderr - cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) + bzip2_path = module.get_bin_path('bzip2') + if not bzip2_path: + module.fail_json(msg="bzip2 command not found") + #bzip2 -d file (uncompress) + rc, stdout, stderr = module.run_command('%s -d %s' % (bzip2_path, target)) + if rc != 0: + return rc, stdout, stderr + #Import sql + cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) + try: rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) if rc != 0: return rc, stdout, stderr - bzip2_path = module.get_bin_path('bzip2') - if bzip2_path: - rc, stdout, stderr = module.run_command('%s %s' % (bzip2_path, os.path.splitext(target)[0])) - else: - module.fail_json(msg="bzip2 command not found") - else: - module.fail_json(msg="bunzip2 command not found") + finally: + #bzip2 file back up + rc, stdout, stderr = module.run_command('%s %s' % (bzip2_path, os.path.splitext(target)[0])) else: cmd += " < %s" % pipes.quote(target) rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) From 0a91ace6ad1d1f61b7a774087ee882e6871448ce Mon Sep 17 00:00:00 2001 From: Dag Wieers Date: Thu, 5 Mar 2015 11:42:34 +0100 Subject: [PATCH 216/236] Update authorized_key.py Fix a (common) error in the examples. This is something that may go unnoticed during troubleshooting when copy&pasting the example. --- system/authorized_key.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index 38a8b96d83f..be2a442346d 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -111,7 +111,7 @@ EXAMPLES = ''' key_options='no-port-forwarding,host="10.0.1.1"' # Set up authorized_keys exclusively with one key -- authorized_keys: user=root key=public_keys/doe-jane state=present +- authorized_key: user=root key=public_keys/doe-jane state=present exclusive=yes ''' From e1e74bdf335904a5e94812db85eb41b279cfc884 Mon Sep 17 00:00:00 2001 From: Sam Yaple Date: Fri, 6 Mar 2015 03:13:32 -0600 Subject: [PATCH 217/236] Updated version and fixed default --- cloud/docker/docker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index f79f5a2d5d4..ef71df017ab 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -213,9 +213,9 @@ options: description: - Set the PID namespace mode for the container (currently only supports 'host'). Requires docker-py >= 1.0.0 and docker >= 1.4.1. required: false - default: false + default: None aliases: [] - version_added: "1.8" + version_added: "1.9" registry: description: - The remote registry URL to use for pulling images. From f16d8d9afbbf38bc4b9b38b86f477f07b764c93e Mon Sep 17 00:00:00 2001 From: Bret Martin Date: Fri, 6 Mar 2015 14:59:27 -0500 Subject: [PATCH 218/236] Allow vpc_subnet_id to remain unspecified (see bc0c169) --- cloud/amazon/ec2.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 544f1e8cc84..b59d4bda669 100755 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -785,6 +785,8 @@ def create_instances(module, ec2, vpc, override_count=None): if vpc_subnet_id: vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id + else: + vpc_id = None try: # Here we try to lookup the group id from the security group name - if group is set. From fedbea682eb8710b9eda1eb4562a57ee2d161cc5 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Fri, 12 Dec 2014 11:01:08 -0500 Subject: [PATCH 219/236] Another documentation commit. --- cloud/docker/docker.py | 401 +++++++++++++++++++---------------------- 1 file changed, 181 insertions(+), 220 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index da4fbc123fb..db2c9e585a3 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -27,335 +27,296 @@ module: docker version_added: "1.4" short_description: manage docker containers description: - - Manage the life cycle of docker containers. +- Manage the life cycle of docker containers. options: count: description: - - Set number of containers to run - required: False + - Number of matching containers that should be in the desired state. default: 1 - aliases: [] image: description: - - Set container image to use + - Container image used to match and launch containers. required: true - default: null - aliases: [] + pull: + description: + - Control when container images are updated from the C(docker_url) registry. + - If "missing," images will be pulled only when missing from the host; if + - '"always," the registry will be checked for a newer version of the image' + - each time the task executes. + default: missing + choices: [ "missing", "always" ] + version_added: "1.9" command: description: - - Set command to run in a container on startup - required: false + - Command used to match and launch containers. default: null - aliases: [] name: description: - - Set name for container (used to find single container or to provide links) - required: false + - Name used to match and uniquely name launched containers. Explicit names + - are used to uniquely identify a single container or to link among + - containers. Mutually exclusive with a "count" other than "1". default: null - aliases: [] version_added: "1.5" ports: description: - - Set private to public port mapping specification using docker CLI-style syntax [([:[host_port]])|():][/udp] - required: false + - List containing private to public port mapping specification. Use docker + - 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000) where' + - 8000 is a container port, 9000 is a host port, and 0.0.0.0 is a host + - interface. default: null - aliases: [] version_added: "1.5" expose: description: - - Set container ports to expose for port mappings or links. (If the port is already exposed using EXPOSE in a Dockerfile, you don't need to expose it again.) - required: false + - List of additional container ports to expose for port mappings or links. + - If the port is already exposed using EXPOSE in a Dockerfile, you don't + - need to expose it again. default: null - aliases: [] version_added: "1.5" publish_all_ports: description: - - Publish all exposed ports to the host interfaces - required: false + - Publish all exposed ports to the host interfaces. default: false - aliases: [] version_added: "1.5" volumes: description: - - Set volume(s) to mount on the container separated with a comma (,) and in the format "source:dest[:rights]" - required: false + - List of volumes to mount within the container using docker CLI-style + - 'syntax: C(/host:/container[:mode]) where "mode" may be "rw" or "ro".' default: null - aliases: [] volumes_from: description: - - Set shared volume(s) from another container - required: false + - List of names of containers to mount volumes from. default: null - aliases: [] links: description: - - Link container(s) to other container(s) (e.g. links=redis,postgresql:db) - required: false + - List of other containers to link within this container with an optional + - 'alias. Use docker CLI-style syntax: C(redis:myredis).' default: null - aliases: [] version_added: "1.5" memory_limit: description: - - Set RAM allocated to container. It will be passed as a number of bytes. For example 1048576 = 1Gb - required: false - default: null - aliases: [] - default: 262144 + - RAM allocated to the container as a number of bytes or as a human-readable + - string like "512MB". Leave as "0" to specify no limit. + default: 0 docker_url: description: - - URL of docker host to issue commands to - required: false - default: unix://var/run/docker.sock - aliases: [] + - URL of the host running the docker daemon. This will default to the env + - var DOCKER_HOST if unspecified. + default: ${DOCKER_HOST} or unix://var/run/docker.sock + docker_tls_cert: + description: + - Path to a PEM-encoded client certificate to secure the Docker connection. + default: ${DOCKER_CERT_PATH}/cert.pem + docker_tls_key: + description: + - Path to a PEM-encoded client key to secure the Docker connection. + default: ${DOCKER_CERT_PATH}/key.pem + docker_tls_cacert: + description: + - Path to a PEM-encoded certificate authority to secure the Docker connection. + default: ${DOCKER_CERT_PATH}/ca.pem docker_api_version: description: - - Remote API version to use. This defaults to the current default as specified by docker-py. - required: false + - Remote API version to use. This defaults to the current default as + - specified by docker-py. default: docker-py default remote API version - aliases: [] version_added: "1.8" username: description: - - Set remote API username - required: false + - Remote API username. default: null - aliases: [] password: description: - - Set remote API password - required: false + - Remote API password. default: null - aliases: [] email: description: - - Set remote API email - required: false + - Remote API email. default: null - aliases: [] hostname: description: - - Set container hostname - required: false + - Container hostname. default: null - aliases: [] domainname: description: - - Set container domain name - required: false + - Container domain name. default: null - aliases: [] env: description: - - Set environment variables (e.g. env="PASSWORD=sEcRe7,WORKERS=4") - required: false + - Pass a dict of environment variables to the container. default: null - aliases: [] dns: description: - - Set custom DNS servers for the container + - List of custom DNS servers for the container. required: false default: null - aliases: [] detach: description: - - Enable detached mode on start up, leaves container running in background - required: false + - Enable detached mode to leave the container running in background. default: true - aliases: [] state: description: - - Set the state of the container + - Assert the container's desired state. "present" only asserts that the + - matching containers exist. "started" asserts that the matching containers + - both exist and are running, but takes no action if any configuration has + - changed. "reloaded" asserts that all matching containers are running and + - restarts any that have any images or configuration out of date. "restarted" + - unconditionally restarts (or starts) the matching containers. "stopped" and + - '"killed" stop and kill all matching containers. "absent" stops and then' + - removes any matching containers. required: false default: present - choices: [ "present", "running", "stopped", "absent", "killed", "restarted" ] - aliases: [] + choices: + - present + - started + - reloaded + - restarted + - killed + - absent privileged: description: - - Set whether the container should run in privileged mode - required: false + - Whether the container should run in privileged mode or not. default: false - aliases: [] lxc_conf: description: - - LXC config parameters, e.g. lxc.aa_profile:unconfined - required: false - default: - aliases: [] - name: - description: - - Set the name of the container (cannot use with count) - required: false + - LXC configuration parameters, such as C(lxc.aa_profile:unconfined). default: null - aliases: [] - version_added: "1.5" stdin_open: description: - - Keep stdin open - required: false + - Keep stdin open after a container is launched. default: false - aliases: [] version_added: "1.6" tty: description: - - Allocate a pseudo-tty - required: false + - Allocate a pseudo-tty within the container. default: false - aliases: [] version_added: "1.6" net: description: - - Set Network mode for the container (bridge, none, container:, host). Requires docker >= 0.11. - required: false + - 'Network mode for the launched container: bridge, none, container:' + - or host. Requires docker >= 0.11. default: false - aliases: [] version_added: "1.8" registry: description: - - The remote registry URL to use for pulling images. - required: false - default: '' + - Remote registry URL to pull images from. + default: DockerHub aliases: [] version_added: "1.8" restart_policy: description: - - Set the container restart policy - required: false - default: false - aliases: [] + - Container restart policy. + choices: ["no", "on-failure", "always"] + default: null version_added: "1.9" restart_policy_retry: description: - - Set the retry limit for container restart policy - required: false - default: false - aliases: [] + - Maximum number of times to restart a container. Leave as "0" for unlimited + - retries. + default: 0 version_added: "1.9" insecure_registry: description: - - Use insecure private registry by HTTP instead of HTTPS (needed for docker-py >= 0.5.0). - required: false + - Use insecure private registry by HTTP instead of HTTPS. Needed for + - docker-py >= 0.5.0. default: false - aliases: [] version_added: "1.9" -author: Cove Schneider, Joshua Conner, Pavel Antonov +author: Cove Schneider, Joshua Conner, Pavel Antonov, Ash Wilson requirements: [ "docker-py >= 0.3.0", "docker >= 0.10.0" ] ''' EXAMPLES = ''' -Start one docker container running tomcat in each host of the web group and bind tomcat's listening port to 8080 -on the host: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat servers - docker: image=centos command="service tomcat6 start" ports=8080 - -The tomcat server's port is NAT'ed to a dynamic port on the host, but you can determine which port the server was -mapped to using docker_containers: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat servers - docker: image=centos command="service tomcat6 start" ports=8080 count=5 - - name: Display IP address and port mappings for containers - debug: msg={{inventory_hostname}}:{{item['HostConfig']['PortBindings']['8080/tcp'][0]['HostPort']}} - with_items: docker_containers - -Just as in the previous example, but iterates over the list of docker containers with a sequence: - -- hosts: web - sudo: yes - vars: - start_containers_count: 5 - tasks: - - name: run tomcat servers - docker: image=centos command="service tomcat6 start" ports=8080 count={{start_containers_count}} - - name: Display IP address and port mappings for containers - debug: msg="{{inventory_hostname}}:{{docker_containers[{{item}}]['HostConfig']['PortBindings']['8080/tcp'][0]['HostPort']}}" - with_sequence: start=0 end={{start_containers_count - 1}} - -Stop, remove all of the running tomcat containers and list the exit code from the stopped containers: - -- hosts: web - sudo: yes - tasks: - - name: stop tomcat servers - docker: image=centos command="service tomcat6 start" state=absent - - name: Display return codes from stopped containers - debug: msg="Returned {{inventory_hostname}}:{{item}}" - with_items: docker_containers - -Create a named container: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat server - docker: image=centos name=tomcat command="service tomcat6 start" ports=8080 - -Create multiple named containers: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat servers - docker: image=centos name={{item}} command="service tomcat6 start" ports=8080 - with_items: - - crookshank - - snowbell - - heathcliff - - felix - - sylvester - -Create containers named in a sequence: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat servers - docker: image=centos name={{item}} command="service tomcat6 start" ports=8080 - with_sequence: start=1 end=5 format=tomcat_%d.example.com - -Create two linked containers: - -- hosts: web - sudo: yes - tasks: - - name: ensure redis container is running - docker: image=crosbymichael/redis name=redis - - - name: ensure redis_ambassador container is running - docker: image=svendowideit/ambassador ports=6379:6379 links=redis:redis name=redis_ambassador_ansible - -Create containers with options specified as key-value pairs and lists: - -- hosts: web - sudo: yes - tasks: - - docker: - image: namespace/image_name - links: - - postgresql:db - - redis:redis - - -Create containers with options specified as strings and lists as comma-separated strings: - -- hosts: web - sudo: yes - tasks: - docker: image=namespace/image_name links=postgresql:db,redis:redis - -Create a container with no networking: - -- hosts: web - sudo: yes - tasks: - docker: image=namespace/image_name net=none - +# Containers are matched either by name (if provided) or by an exact match of +# the image they were launched with and the command they're running. The module +# can accept either a name to target a container uniquely, or a count to operate +# on multiple containers at once when it makes sense to do so. + +# Ensure that a data container with the name "mydata" exists. If no container +# by this name exists, it will be created, but not started. + +- name: data container + docker: + name: mydata + image: busybox + state: present + volumes: + - /data + +# Ensure that a Redis server is running, using the volume from the data +# container. Expose the default Redis port. + +- name: redis container + docker: + name: myredis + image: redis + command: redis-server --appendonly yes + state: started + expose: + - 6379 + volumes_from: + - mydata + +# Ensure that a container of your application server is running. This will: +# - pull the latest version of your application image from DockerHub. +# - ensure that a container is running with the specified name and exact image. +# If any configuration options have changed, the existing container will be +# stopped and removed, and a new one will be launched in its place. +# - link this container to the existing redis container launched above with +# an alias. +# - bind TCP port 9000 within the container to port 8080 on all interfaces +# on the host. +# - bind UDP port 9001 within the container to port 8081 on the host, only +# listening on localhost. +# - set the environment variable SECRET_KEY to "ssssh". + +- name: application container + docker: + name: myapplication + image: someuser/appimage + state: reloaded + pull: always + links: + - "myredis:aliasedredis" + ports: + - "8080:9000" + - "127.0.0.1:8081:9001/udp" + env: + SECRET_KEY: ssssh + +# Ensure that exactly five containers of another server are running with this +# exact image and command. If fewer than five are running, more will be launched; +# if more are running, the excess will be stopped. + +- name: load-balanced containers + docker: + state: reloaded + count: 5 + image: someuser/anotherappimage + command: sleep 1d + +# Unconditionally restart a service container. This may be useful within a +# handler, for example. + +- name: application service + docker: + name: myservice + image: someuser/serviceimage + state: restarted + +# Stop all containers running the specified image. + +- name: obsolete container + docker: + image: someuser/oldandbusted + state: stopped + +# Stop and remove a container with the specified name. + +- name: obsolete container + docker: + name: ohno + image: someuser/oldandbusted + state: absent ''' HAS_DOCKER_PY = True @@ -494,7 +455,7 @@ class DockerManager(object): if len(parts) == 2: self.volumes[parts[1]] = {} self.binds[parts[0]] = parts[1] - # with bind mode + # with bind mode elif len(parts) == 3: if parts[2] not in ['ro', 'rw']: self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') From 7490223940f321282ddeb5c8c015ff7dc0eb0bef Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Fri, 12 Dec 2014 11:14:24 -0500 Subject: [PATCH 220/236] Pull newer image versions when requested. --- cloud/docker/docker.py | 88 +++++++++++++++++++++++++++++++----------- 1 file changed, 65 insertions(+), 23 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index db2c9e585a3..0cf7296f370 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -322,6 +322,8 @@ EXAMPLES = ''' HAS_DOCKER_PY = True import sys +import json +import re from urlparse import urlparse try: import docker.client @@ -631,6 +633,18 @@ class DockerManager(object): return False + def get_inspect_image(self): + image, tag = get_split_image_tag(self.module.params.get('image')) + if tag is None: + tag = 'latest' + resource = '%s:%s' % (image, tag) + + matching_image = None + for image in self.client.images(name=image): + if resource in image.get('RepoTags', []): + matching_image = image + return matching_image + def get_inspect_containers(self, containers): inspect = [] for i in containers: @@ -683,6 +697,41 @@ class DockerManager(object): return running + def pull_image(self): + extra_params = {} + if self.module.params.get('insecure_registry'): + if self.ensure_capability('insecure_registry', fail=False): + extra_params['insecure_registry'] = self.module.params.get('insecure_registry') + + resource = self.module.params.get('image') + image, tag = get_split_image_tag(resource) + if self.module.params.get('username'): + try: + self.client.login( + self.module.params.get('username'), + password=self.module.params.get('password'), + email=self.module.params.get('email'), + registry=self.module.params.get('registry') + ) + except: + self.module.fail_json(msg="failed to login to the remote registry, check your username/password.") + try: + last = None + for line in self.client.pull(image, tag=tag, stream=True, **extra_params): + last = line + status = json.loads(last).get('status', '') + if status.startswith('Status: Image is up to date for'): + # Image is already up to date. Don't increment the counter. + pass + elif status.startswith('Status: Downloaded newer image for'): + # Image was updated. Increment the pull counter. + self.increment_counter('pull') + else: + # Unrecognized status string. + self.module.fail_json(msg="Unrecognized status from pull", status=status) + except: + self.module.fail_json(msg="failed to pull the specified image: %s" % resource) + def create_containers(self, count=1): params = {'image': self.module.params.get('image'), 'command': self.module.params.get('command'), @@ -704,11 +753,6 @@ class DockerManager(object): if params['volumes_from'] is not None: self.ensure_capability('volumes_from') - extra_params = {} - if self.module.params.get('insecure_registry'): - if self.ensure_capability('insecure_registry', fail=False): - extra_params['insecure_registry'] = self.module.params.get('insecure_registry') - def do_create(count, params): results = [] for _ in range(count): @@ -721,23 +765,7 @@ class DockerManager(object): try: containers = do_create(count, params) except: - resource = self.module.params.get('image') - image, tag = get_split_image_tag(resource) - if self.module.params.get('username'): - try: - self.client.login( - self.module.params.get('username'), - password=self.module.params.get('password'), - email=self.module.params.get('email'), - registry=self.module.params.get('registry') - ) - except: - self.module.fail_json(msg="failed to login to the remote registry, check your username/password.") - try: - self.client.pull(image, tag=tag, **extra_params) - except: - self.module.fail_json(msg="failed to pull the specified image: %s" % resource) - self.increment_counter('pull') + self.pull_image() containers = do_create(count, params) return containers @@ -803,6 +831,7 @@ def main(): argument_spec = dict( count = dict(default=1), image = dict(required=True), + pull = dict(required=False, default='missing', choices=['missing', 'always']), command = dict(required=False, default=None), expose = dict(required=False, default=None, type='list'), ports = dict(required=False, default=None, type='list'), @@ -845,12 +874,25 @@ def main(): count = int(module.params.get('count')) name = module.params.get('name') image = module.params.get('image') + pull = module.params.get('pull') if count < 0: module.fail_json(msg="Count must be greater than zero") if count > 1 and name: module.fail_json(msg="Count and name must not be used together") + # Explicitly pull new container images, if requested. + # Do this before noticing running and deployed containers so that the image names will differ + # if a newer image has been pulled. + if pull == "always": + manager.pull_image() + + # Find the ID of the requested image and tag, if available. + image_id = None + inspected_image = manager.get_inspect_image() + if inspected_image: + image_id = inspected_image.get('Id') + running_containers = manager.get_running_containers() running_count = len(running_containers) delta = count - running_count @@ -873,7 +915,7 @@ def main(): # the named container is running, but with a # different image or tag, so we stop it first - if existing_container and existing_container.get('Config', dict()).get('Image') != image: + if existing_container and (image_id is None or existing_container.get('Image') != image_id): manager.stop_containers([existing_container]) manager.remove_containers([existing_container]) running_containers = manager.get_running_containers() From 80aca4b9369073d7b58ff168cf4eef8bb126c2d0 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Tue, 17 Feb 2015 16:27:00 -0500 Subject: [PATCH 221/236] Rework docker module states. Organize each state into a distinct function for readability and composability. Rework `present` to create but not start containers. Add a `restarted` state to unconditionally restart a container and a `reloaded` state to restart a container if and only if its configuration is incorrect. Store our most recent knowledge about container states in a ContainerSet object. Improve the value registered by this task to include not only the inspect data from any changed containers, but also action counters in their native form, a summary message for all actions taken, and a `reload_reasons` key to store a human-readable diagnostic to determine why each container was reloaded. --- cloud/docker/docker.py | 630 +++++++++++++++++++++++++++++++++-------- 1 file changed, 507 insertions(+), 123 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 0cf7296f370..8efa9a448bb 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -164,12 +164,13 @@ options: - '"killed" stop and kill all matching containers. "absent" stops and then' - removes any matching containers. required: false - default: present + default: started choices: - present - started - reloaded - restarted + - stopped - killed - absent privileged: @@ -324,6 +325,8 @@ HAS_DOCKER_PY = True import sys import json import re +import os +import shlex from urlparse import urlparse try: import docker.client @@ -356,9 +359,11 @@ def _human_to_bytes(number): print "failed=True msg='Could not convert %s to integer'" % (number) sys.exit(1) + def _ansible_facts(container_list): return {"docker_containers": container_list} + def _docker_id_quirk(inspect): # XXX: some quirk in docker if 'ID' in inspect: @@ -385,6 +390,13 @@ def get_split_image_tag(image): return resource, tag + +def is_running(container): + '''Return True if an inspected container is in a state we consider "running."''' + + return container['State']['Running'] == True and not container['State'].get('Ghost', False) + + def get_docker_py_versioninfo(): if hasattr(docker, '__version__'): # a '__version__' attribute was added to the module but not until @@ -414,6 +426,7 @@ def get_docker_py_versioninfo(): return tuple(version) + def check_dependencies(module): """ Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a @@ -429,8 +442,12 @@ def check_dependencies(module): class DockerManager(object): - counters = {'created':0, 'started':0, 'stopped':0, 'killed':0, 'removed':0, 'restarted':0, 'pull':0} + counters = dict( + created=0, started=0, stopped=0, killed=0, removed=0, restarted=0, pulled=0 + ) + reload_reasons = [] _capabilities = set() + # Map optional parameters to minimum (docker-py version, server APIVersion) # docker-py version is a tuple of ints because we have to compare them # server APIVersion is passed to a docker-py function that takes strings @@ -553,7 +570,6 @@ class DockerManager(object): return processed_links - def get_exposed_ports(self, expose_list): """ Parse the ports and protocols (TCP/UDP) to expose in the docker-py `create_container` call from the docker CLI-style syntax. @@ -572,7 +588,6 @@ class DockerManager(object): else: return None - def get_port_bindings(self, ports): """ Parse the `ports` string into a port bindings dict for the `start_container` call. @@ -615,6 +630,37 @@ class DockerManager(object): return binds + def get_summary_message(self): + ''' + Generate a message that briefly describes the actions taken by this + task, in English. + ''' + + parts = [] + for k, v in self.counters.iteritems(): + if v == 0: + continue + + if v == 1: + plural = "" + else: + plural = "s" + parts.append("%s %d container%s" % (k, v, plural)) + + if parts: + return ", ".join(parts) + "." + else: + return "No action taken." + + def get_reload_reason_message(self): + ''' + Generate a message describing why any reloaded containers were reloaded. + ''' + + if self.reload_reasons: + return ", ".join(self.reload_reasons) + else: + return None def get_summary_counters_msg(self): msg = "" @@ -654,9 +700,281 @@ class DockerManager(object): return inspect + def get_differing_containers(self): + """ + Inspect all matching, running containers, and return those that were + started with parameters that differ from the ones that are provided + during this module run. A list containing the differing + containers will be returned, and a short string describing the specific + difference encountered in each container will be appended to + reload_reasons. + + This generates the set of containers that need to be stopped and + started with new parameters with state=reloaded. + """ + + running = self.get_running_containers() + current = self.get_inspect_containers(running) + + image = self.get_inspect_image() + if image is None: + # The image isn't present. Assume that we're about to pull a new + # tag and *everything* will be restarted. + # + # This will give false positives if you untag an image on the host + # and there's nothing more to pull. + return current + + differing = [] + + for container in current: + + # IMAGE + # Compare the image by ID rather than name, so that containers + # will be restarted when new versions of an existing image are + # pulled. + if container['Image'] != image['Id']: + self.reload_reasons.append('image ({} => {})'.format(container['Image'], image['Id'])) + differing.append(container) + continue + + # COMMAND + + expected_command = self.module.params.get('command') + if expected_command: + expected_command = shlex.split(expected_command) + actual_command = container["Config"]["Cmd"] + + if actual_command != expected_command: + self.reload_reasons.append('command ({} => {})'.format(actual_command, expected_command)) + differing.append(container) + continue + + # EXPOSED PORTS + # Note that ports that are bound at container run are also exposed + # implicitly. + expected_exposed_ports = set() + for p in (self.exposed_ports or []): + expected_exposed_ports.add("/".join(p)) + + actually_exposed_ports = set((container["Config"]["ExposedPorts"] or {}).keys()) + + if actually_exposed_ports != expected_exposed_ports: + self.reload_reasons.append('exposed_ports ({} => {})'.format(actually_exposed_ports, expected_exposed_ports)) + differing.append(container) + continue + + # VOLUMES + # not including bind modes. + + expected_volume_keys = set() + if self.volumes: + for key, config in self.volumes.iteritems(): + if not config and key not in self.binds: + expected_volume_keys.add(key) + actual_volume_keys = set((container['Config']['Volumes'] or {}).keys()) + + if actual_volume_keys != expected_volume_keys: + self.reload_reasons.append('volumes ({} => {})'.format(actual_volume_keys, expected_volume_keys)) + differing.append(container) + continue + + # MEM_LIMIT + + expected_mem = _human_to_bytes(self.module.params.get('memory_limit')) + actual_mem = container['Config']['Memory'] + + if expected_mem and actual_mem != expected_mem: + self.reload_reasons.append('memory ({} => {})'.format(actual_mem, expected_mem)) + differing.append(container) + continue + + # ENVIRONMENT + # actual_env is likely to include environment variables injected by + # the Dockerfile. + + expected_env = set() + if self.env: + for name, value in self.env.iteritems(): + expected_env.add("{}={}".format(name, value)) + actual_env = set(container['Config']['Env'] or []) + + if not actual_env.issuperset(expected_env): + # Don't include the environment difference in the output. + self.reload_reasons.append('environment') + differing.append(container) + continue + + # HOSTNAME + + expected_hostname = self.module.params.get('hostname') + actual_hostname = container['Config']['Hostname'] + if expected_hostname and actual_hostname != expected_hostname: + self.reload_reasons.append('hostname ({} => {})'.format(actual_hostname, expected_hostname)) + differing.append(container) + continue + + # DOMAINNAME + + expected_domainname = self.module.params.get('domainname') + actual_domainname = container['Config']['Domainname'] + if expected_domainname and actual_domainname != expected_domainname: + self.reload_reasons.append('domainname ({} => {})'.format(actual_domainname, expected_domainname)) + differing.append(container) + continue + + # DETACH + + # We don't have to check for undetached containers. If it wasn't + # detached, it would have stopped before the playbook continued! + + # NAME + + # We also don't have to check name, because this is one of the + # criteria that's used to determine which container(s) match in + # the first place. + + # STDIN_OPEN + + expected_stdin_open = self.module.params.get('stdin_open') + actual_stdin_open = container['Config']['AttachStdin'] + if actual_stdin_open != expected_stdin_open: + self.reload_reasons.append('stdin_open ({} => {})'.format(actual_stdin_open, expected_stdin_open)) + differing.append(container) + continue + + # TTY + + expected_tty = self.module.params.get('tty') + actual_tty = container['Config']['Tty'] + if actual_tty != expected_tty: + self.reload_reasons.append('tty ({} => {})'.format(actual_tty, expected_tty)) + differing.append(container) + continue + + # -- "start" call differences -- + + # LXC_CONF + + if self.lxc_conf: + expected_lxc = set(self.lxc_conf) + actual_lxc = set(container['HostConfig']['LxcConf'] or []) + if actual_lxc != expected_lxc: + self.reload_reasons.append('lxc_conf ({} => {})'.format(actual_lxc, expected_lxc)) + differing.append(container) + continue + + # BINDS + + expected_binds = set() + if self.binds: + for host_path, config in self.binds.iteritems(): + if isinstance(config, dict): + container_path = config['bind'] + if config['ro']: + mode = 'ro' + else: + mode = 'rw' + else: + container_path = config + mode = 'rw' + expected_binds.add("{}:{}:{}".format(host_path, container_path, mode)) + + actual_binds = set() + for bind in (container['HostConfig']['Binds'] or []): + if len(bind.split(':')) == 2: + actual_binds.add(bind + ":rw") + else: + actual_binds.add(bind) + + if actual_binds != expected_binds: + self.reload_reasons.append('binds ({} => {})'.format(actual_binds, expected_binds)) + differing.append(container) + continue + + # PORT BINDINGS + + expected_bound_ports = {} + if self.port_bindings: + for container_port, config in self.port_bindings.iteritems(): + if isinstance(container_port, int): + container_port = "{}/tcp".format(container_port) + bind = {} + if len(config) == 1: + bind['HostIp'] = "0.0.0.0" + bind['HostPort'] = "" + else: + bind['HostIp'] = config[0] + bind['HostPort'] = str(config[1]) + + expected_bound_ports[container_port] = [bind] + + actual_bound_ports = container['HostConfig']['PortBindings'] or {} + + if actual_bound_ports != expected_bound_ports: + self.reload_reasons.append('port bindings ({} => {})'.format(actual_bound_ports, expected_bound_ports)) + differing.append(container) + continue + + # PUBLISHING ALL PORTS + + # What we really care about is the set of ports that is actually + # published. That should be caught above. + + # PRIVILEGED + + expected_privileged = self.module.params.get('privileged') + actual_privileged = container['HostConfig']['Privileged'] + if actual_privileged != expected_privileged: + self.reload_reasons.append('privileged ({} => {})'.format(actual_privileged, expected_privileged)) + differing.append(container) + continue + + # LINKS + + expected_links = set() + for link, alias in (self.links or {}).iteritems(): + expected_links.add("/{}:/running/{}".format(link, alias)) + + actual_links = set(container['HostConfig']['Links'] or []) + if actual_links != expected_links: + self.reload_reasons.append('links ({} => {})'.format(actual_links, expected_links)) + differing.append(container) + continue + + # NETWORK MODE + + expected_netmode = self.module.params.get('net') or '' + actual_netmode = container['HostConfig']['NetworkMode'] + if actual_netmode != expected_netmode: + self.reload_reasons.append('net ({} => {})'.format(actual_netmode, expected_netmode)) + differing.append(container) + continue + + # DNS + + expected_dns = set(self.module.params.get('dns') or []) + actual_dns = set(container['HostConfig']['Dns'] or []) + if actual_dns != expected_dns: + self.reload_reasons.append('dns ({} => {})'.format(actual_dns, expected_dns)) + differing.append(container) + continue + + # VOLUMES_FROM + + expected_volumes_from = set(self.module.params.get('volumes_from') or []) + actual_volumes_from = set(container['HostConfig']['VolumesFrom'] or []) + if actual_volumes_from != expected_volumes_from: + self.reload_reasons.append('volumes_from ({} => {})'.format(actual_volumes_from, expected_volumes_from)) + differing.append(container) + + return differing + def get_deployed_containers(self): - """determine which images/commands are running already""" - image = self.module.params.get('image') + """ + Return any matching containers that are already present. + """ + command = self.module.params.get('command') if command: command = command.strip() @@ -665,37 +983,41 @@ class DockerManager(object): name = '/' + name deployed = [] - # if we weren't given a tag with the image, we need to only compare on the image name, as that - # docker will give us back the full image name including a tag in the container list if one exists. - image, tag = get_split_image_tag(image) + # "images" will be a collection of equivalent "name:tag" image names + # that map to the same Docker image. + inspected = self.get_inspect_image() + if inspected: + images = inspected.get('RepoTags', []) + else: + image, tag = get_split_image_tag(self.module.params.get('image')) + images = [':'.join([image, tag])] for i in self.client.containers(all=True): - running_image, running_tag = get_split_image_tag(i['Image']) + running_image = i['Image'] running_command = i['Command'].strip() + match = False + + if name: + matches = name in i.get('Names', []) + else: + image_matches = running_image in images - name_matches = False - if i["Names"]: - name_matches = (name and name in i['Names']) - image_matches = (running_image == image) - tag_matches = (not tag or running_tag == tag) - # if a container has an entrypoint, `command` will actually equal - # '{} {}'.format(entrypoint, command) - command_matches = (not command or running_command.endswith(command)) + # if a container has an entrypoint, `command` will actually equal + # '{} {}'.format(entrypoint, command) + command_matches = (not command or running_command.endswith(command)) - if name_matches or (name is None and image_matches and tag_matches and command_matches): + matches = image_matches and command_matches + + if matches: details = self.client.inspect_container(i['Id']) details = _docker_id_quirk(details) + deployed.append(details) return deployed def get_running_containers(self): - running = [] - for i in self.get_deployed_containers(): - if i['State']['Running'] == True and i['State'].get('Ghost', False) == False: - running.append(i) - - return running + return [c for c in self.get_deployed_containers() if is_running(c)] def pull_image(self): extra_params = {} @@ -713,8 +1035,8 @@ class DockerManager(object): email=self.module.params.get('email'), registry=self.module.params.get('registry') ) - except: - self.module.fail_json(msg="failed to login to the remote registry, check your username/password.") + except e: + self.module.fail_json(msg="failed to login to the remote registry, check your username/password.", error=repr(e)) try: last = None for line in self.client.pull(image, tag=tag, stream=True, **extra_params): @@ -725,12 +1047,12 @@ class DockerManager(object): pass elif status.startswith('Status: Downloaded newer image for'): # Image was updated. Increment the pull counter. - self.increment_counter('pull') + self.increment_counter('pulled') else: # Unrecognized status string. - self.module.fail_json(msg="Unrecognized status from pull", status=status) - except: - self.module.fail_json(msg="failed to pull the specified image: %s" % resource) + self.module.fail_json(msg="Unrecognized status from pull.", status=status) + except e: + self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e)) def create_containers(self, count=1): params = {'image': self.module.params.get('image'), @@ -776,7 +1098,7 @@ class DockerManager(object): 'binds': self.binds, 'port_bindings': self.port_bindings, 'publish_all_ports': self.module.params.get('publish_all_ports'), - 'privileged': self.module.params.get('privileged'), + 'privileged': self.module.params.get('privileged'), 'links': self.links, 'network_mode': self.module.params.get('net'), } @@ -826,6 +1148,129 @@ class DockerManager(object): self.increment_counter('restarted') +class ContainerSet: + + def __init__(self, manager): + self.manager = manager + self.running = [] + self.deployed = [] + self.changed = [] + + def refresh(self): + ''' + Update our view of the matching containers from the Docker daemon. + ''' + + + self.deployed = self.manager.get_deployed_containers() + self.running = [c for c in self.deployed if is_running(c)] + + def notice_changed(self, containers): + ''' + Record a collection of containers as "changed". + ''' + + self.changed.extend(containers) + + +def present(manager, containers, count, name): + '''Ensure that exactly `count` matching containers exist in any state.''' + + containers.refresh() + delta = count - len(containers.deployed) + + if delta > 0: + containers.notice_changed(manager.create_containers(delta)) + + if delta < 0: + # If both running and stopped containers exist, remove + # stopped containers first. + containers.deployed.sort(lambda cx, cy: cmp(is_running(cx), is_running(cy))) + + to_stop = [] + to_remove = [] + for c in containers.deployed[0:-delta]: + if is_running(c): + to_stop.append(c) + to_remove.append(c) + + manager.stop_containers(to_stop) + manager.remove_containers(to_remove) + containers.notice_changed(to_remove) + +def started(manager, containers, count, name): + '''Ensure that exactly `count` matching containers exist and are running.''' + + containers.refresh() + delta = count - len(containers.running) + + if delta > 0: + if name and containers.deployed: + # A stopped container exists with the requested name. + # Clean it up before attempting to start a new one. + manager.remove_containers(containers.deployed) + + created = manager.create_containers(delta) + manager.start_containers(created) + containers.notice_changed(created) + + if delta < 0: + excess = containers.running[0:-delta] + manager.stop_containers(excess) + manager.remove_containers(excess) + containers.notice_changed(excess) + +def reloaded(manager, containers, count, name): + ''' + Ensure that exactly `count` matching containers exist and are + running. If any associated settings have been changed (volumes, + ports or so on), restart those containers. + ''' + + containers.refresh() + + for container in manager.get_differing_containers(): + manager.stop_containers([container]) + manager.remove_containers([container]) + + started(manager, containers, count, name) + +def restarted(manager, containers, count, name): + ''' + Ensure that exactly `count` matching containers exist and are + running. Unconditionally restart any that were already running. + ''' + + containers.refresh() + + manager.restart_containers(containers.running) + started(manager, containers, count, name) + +def stopped(manager, containers, count, name): + '''Stop any matching containers that are running.''' + + containers.refresh() + + manager.stop_containers(containers.running) + containers.notice_changed(containers.running) + +def killed(manager, containers, count, name): + '''Kill any matching containers that are running.''' + + containers.refresh() + + manager.kill_containers(containers.running) + containers.notice_changed(containers.running) + +def absent(manager, containers, count, name): + '''Stop and remove any matching containers.''' + + containers.refresh() + + manager.stop_containers(containers.running) + manager.remove_containers(containers.deployed) + containers.notice_changed(containers.deployed) + def main(): module = AnsibleModule( argument_spec = dict( @@ -852,7 +1297,7 @@ def main(): env = dict(type='dict'), dns = dict(), detach = dict(default=True, type='bool'), - state = dict(default='running', choices=['absent', 'present', 'running', 'stopped', 'killed', 'restarted']), + state = dict(default='started', choices=['present', 'started', 'reloaded', 'restarted', 'stopped', 'killed', 'absent']), restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']), restart_policy_retry = dict(default=0, type='int'), debug = dict(default=False, type='bool'), @@ -878,6 +1323,7 @@ def main(): if count < 0: module.fail_json(msg="Count must be greater than zero") + if count > 1 and name: module.fail_json(msg="Count and name must not be used together") @@ -887,101 +1333,39 @@ def main(): if pull == "always": manager.pull_image() - # Find the ID of the requested image and tag, if available. - image_id = None - inspected_image = manager.get_inspect_image() - if inspected_image: - image_id = inspected_image.get('Id') - - running_containers = manager.get_running_containers() - running_count = len(running_containers) - delta = count - running_count - deployed_containers = manager.get_deployed_containers() - facts = None + containers = ContainerSet(manager) failed = False - changed = False - - # start/stop containers - if state in [ "running", "present" ]: - - # make sure a container with `name` exists, if not create and start it - if name: - # first determine if a container with this name exists - existing_container = None - for deployed_container in deployed_containers: - if deployed_container.get('Name') == '/%s' % name: - existing_container = deployed_container - break - - # the named container is running, but with a - # different image or tag, so we stop it first - if existing_container and (image_id is None or existing_container.get('Image') != image_id): - manager.stop_containers([existing_container]) - manager.remove_containers([existing_container]) - running_containers = manager.get_running_containers() - deployed_containers = manager.get_deployed_containers() - existing_container = None - - # if the container isn't running (or if we stopped the - # old version above), create and (maybe) start it up now - if not existing_container: - containers = manager.create_containers(1) - if state == "present": # otherwise it get (re)started later anyways.. - manager.start_containers(containers) - running_containers = manager.get_running_containers() - deployed_containers = manager.get_deployed_containers() - - if state == "running": - # make sure a container with `name` is running - if name and "/" + name not in map(lambda x: x.get('Name'), running_containers): - manager.start_containers(deployed_containers) - - # start more containers if we don't have enough - elif delta > 0: - containers = manager.create_containers(delta) - manager.start_containers(containers) - - # stop containers if we have too many - elif delta < 0: - containers_to_stop = running_containers[0:abs(delta)] - containers = manager.stop_containers(containers_to_stop) - manager.remove_containers(containers_to_stop) - - facts = manager.get_running_containers() - else: - facts = manager.get_deployed_containers() - - # stop and remove containers - elif state == "absent": - facts = manager.stop_containers(deployed_containers) - manager.remove_containers(deployed_containers) - - # stop containers - elif state == "stopped": - facts = manager.stop_containers(running_containers) - # kill containers - elif state == "killed": - manager.kill_containers(running_containers) - - # restart containers - elif state == "restarted": - manager.restart_containers(running_containers) - facts = manager.get_inspect_containers(running_containers) - - msg = "%s container(s) running image %s with command %s" % \ - (manager.get_summary_counters_msg(), module.params.get('image'), module.params.get('command')) - changed = manager.has_changed() + if state == 'present': + present(manager, containers, count, name) + elif state == 'started': + started(manager, containers, count, name) + elif state == 'reloaded': + reloaded(manager, containers, count, name) + elif state == 'restarted': + restarted(manager, containers, count, name) + elif state == 'stopped': + stopped(manager, containers, count, name) + elif state == 'killed': + killed(manager, containers, count, name) + elif state == 'absent': + absent(manager, containers, count, name) + else: + module.fail_json(msg='Unrecognized state %s. Must be one of: ' + 'present; started; reloaded; restarted; ' + 'stopped; killed; absent.' % state) - module.exit_json(failed=failed, changed=changed, msg=msg, ansible_facts=_ansible_facts(facts)) + module.exit_json(changed=manager.has_changed(), + msg=manager.get_summary_message(), + summary=manager.counters, + containers=containers.changed, + reload_reasons=manager.get_reload_reason_message()) - except DockerAPIError, e: - changed = manager.has_changed() - module.exit_json(failed=True, changed=changed, msg="Docker API error: " + e.explanation) + except DockerAPIError as e: + module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation) - except RequestException, e: - changed = manager.has_changed() - module.exit_json(failed=True, changed=changed, msg=repr(e)) + except RequestException as e: + module.fail_json(changed=manager.has_changed(), msg=repr(e)) # import module snippets from ansible.module_utils.basic import * From d046425049c9b644458592011f3c22b338eea88a Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Wed, 18 Feb 2015 10:29:53 -0500 Subject: [PATCH 222/236] Connect to Docker with optional TLS. --- cloud/docker/docker.py | 49 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 45 insertions(+), 4 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 8efa9a448bb..09ca7f2b70a 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -507,12 +507,50 @@ class DockerManager(object): self.env = self.module.params.get('env', None) - # connect to docker server - docker_url = urlparse(module.params.get('docker_url')) + # Connect to the docker server using any configured host and TLS settings. + + env_host = os.getenv('DOCKER_HOST') + env_cert_path = os.getenv('DOCKER_CERT_PATH') + + docker_url = module.params.get('docker_url') + if not docker_url: + if env_host: + docker_url = env_host + else: + docker_url = 'unix://var/run/docker.sock' + + docker_tls_cert = module.params.get('docker_tls_cert') + if not docker_tls_cert and env_cert_path: + docker_tls_cert = os.path.join(env_cert_path, 'cert.pem') + + docker_tls_key = module.params.get('docker_tls_key') + if not docker_tls_key and env_cert_path: + docker_tls_key = os.path.join(env_cert_path, 'key.pem') + + docker_tls_cacert = module.params.get('docker_tls_cacert') + if not docker_tls_cacert and env_cert_path: + docker_tls_cacert = os.path.join(env_cert_path, 'ca.pem') + docker_api_version = module.params.get('docker_api_version') if not docker_api_version: docker_api_version=docker.client.DEFAULT_DOCKER_API_VERSION - self.client = docker.Client(base_url=docker_url.geturl(), version=docker_api_version) + + tls_config = None + if docker_tls_cert or docker_tls_key or docker_tls_cacert: + # See https://github.com/docker/docker-py/blob/d39da11/docker/utils/utils.py#L279-L296 + docker_url = docker_url.replace('tcp://', 'https://') + verify = docker_tls_cacert is not None + + tls_config = docker.tls.TLSConfig( + client_cert=(docker_tls_cert, docker_tls_key), + ca_cert=docker_tls_cacert, + verify=verify, + assert_hostname=False + ) + + self.client = docker.Client(base_url=docker_url, + version=docker_api_version, + tls=tls_config) self.docker_py_versioninfo = get_docker_py_versioninfo() @@ -1286,7 +1324,10 @@ def main(): links = dict(default=None, type='list'), memory_limit = dict(default=0), memory_swap = dict(default=0), - docker_url = dict(default='unix://var/run/docker.sock'), + docker_url = dict(), + docker_tls_cert = dict(), + docker_tls_key = dict(), + docker_tls_cacert = dict(), docker_api_version = dict(), username = dict(default=None), password = dict(), From 79db47531650e7c87658a59a5d9e293ab39ec684 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Mon, 9 Mar 2015 10:05:56 -0400 Subject: [PATCH 223/236] Full image inspection and just repo tags Hat tip to @bobrik. --- cloud/docker/docker.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index f42cf882846..1bfbdf99581 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -725,16 +725,24 @@ class DockerManager(object): return False def get_inspect_image(self): + try: + return self.client.inspect_image(self.module.params.get('image')) + except DockerAPIError as e: + if e.response.status_code == 404: + return None + else: + raise e + + def get_image_repo_tags(self): image, tag = get_split_image_tag(self.module.params.get('image')) if tag is None: tag = 'latest' resource = '%s:%s' % (image, tag) - matching_image = None for image in self.client.images(name=image): if resource in image.get('RepoTags', []): - matching_image = image - return matching_image + return image['RepoTags'] + return None def get_inspect_containers(self, containers): inspect = [] @@ -1032,10 +1040,10 @@ class DockerManager(object): # that map to the same Docker image. inspected = self.get_inspect_image() if inspected: - images = inspected.get('RepoTags', []) + repo_tags = self.get_image_repo_tags() else: image, tag = get_split_image_tag(self.module.params.get('image')) - images = [':'.join([image, tag])] + repo_tags = [':'.join([image, tag])] for i in self.client.containers(all=True): running_image = i['Image'] @@ -1045,7 +1053,7 @@ class DockerManager(object): if name: matches = name in i.get('Names', []) else: - image_matches = running_image in images + image_matches = running_image in repo_tags # if a container has an entrypoint, `command` will actually equal # '{} {}'.format(entrypoint, command) From 2c58eb38458816ff36080676176b8ccb4f1c1365 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Mon, 9 Mar 2015 10:20:04 -0400 Subject: [PATCH 224/236] Exposed ports, env vars, volumes from the image. This will account for settings that are provided by the hierarchy of Dockerfiles used to construct your image, rather than only accounting for settings provided to the module directly. --- cloud/docker/docker.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 1bfbdf99581..b1d799f9aaf 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -804,9 +804,7 @@ class DockerManager(object): continue # EXPOSED PORTS - # Note that ports that are bound at container run are also exposed - # implicitly. - expected_exposed_ports = set() + expected_exposed_ports = set((image['ContainerConfig']['ExposedPorts'] or {}).keys()) for p in (self.exposed_ports or []): expected_exposed_ports.add("/".join(p)) @@ -820,7 +818,7 @@ class DockerManager(object): # VOLUMES # not including bind modes. - expected_volume_keys = set() + expected_volume_keys = set((image['ContainerConfig']['Volumes'] or {}).keys()) if self.volumes: for key, config in self.volumes.iteritems(): if not config and key not in self.binds: @@ -846,13 +844,13 @@ class DockerManager(object): # actual_env is likely to include environment variables injected by # the Dockerfile. - expected_env = set() + expected_env = set(image['ContainerConfig']['Env'] or []) if self.env: for name, value in self.env.iteritems(): expected_env.add("{}={}".format(name, value)) actual_env = set(container['Config']['Env'] or []) - if not actual_env.issuperset(expected_env): + if actual_env != expected_env: # Don't include the environment difference in the output. self.reload_reasons.append('environment') differing.append(container) From 608ddbea29ed7b5ba85efdf465e844b2a32494c4 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Mon, 9 Mar 2015 11:20:14 -0400 Subject: [PATCH 225/236] Include bind mounts in VOLUMES check. --- cloud/docker/docker.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index b1d799f9aaf..ff8391ae6e6 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -816,13 +816,11 @@ class DockerManager(object): continue # VOLUMES - # not including bind modes. expected_volume_keys = set((image['ContainerConfig']['Volumes'] or {}).keys()) if self.volumes: - for key, config in self.volumes.iteritems(): - if not config and key not in self.binds: - expected_volume_keys.add(key) + expected_volume_keys.update(self.volumes.keys()) + actual_volume_keys = set((container['Config']['Volumes'] or {}).keys()) if actual_volume_keys != expected_volume_keys: From 19664a96dbf08c29a75e1f9b2c87d66225f4a4f4 Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Mon, 9 Mar 2015 12:17:39 -0400 Subject: [PATCH 226/236] Expect module env vars to override Dockerfile ones. --- cloud/docker/docker.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index ff8391ae6e6..b382a388835 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -842,11 +842,20 @@ class DockerManager(object): # actual_env is likely to include environment variables injected by # the Dockerfile. - expected_env = set(image['ContainerConfig']['Env'] or []) + expected_env = {} + + for image_env in image['ContainerConfig']['Env'] or []: + name, value = image_env.split('=', 1) + expected_env[name] = value + if self.env: for name, value in self.env.iteritems(): - expected_env.add("{}={}".format(name, value)) - actual_env = set(container['Config']['Env'] or []) + expected_env[name] = value + + actual_env = {} + for container_env in container['Config']['Env'] or []: + name, value = container_env.split('=', 1) + actual_env[name] = value if actual_env != expected_env: # Don't include the environment difference in the output. From 0969fd75c867a2f653d8a201257593904052dfee Mon Sep 17 00:00:00 2001 From: Ash Wilson Date: Mon, 9 Mar 2015 12:54:22 -0400 Subject: [PATCH 227/236] Use {0}, {1} style format indices for 2.6 compat --- cloud/docker/docker.py | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index b382a388835..7a836940852 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -787,7 +787,7 @@ class DockerManager(object): # will be restarted when new versions of an existing image are # pulled. if container['Image'] != image['Id']: - self.reload_reasons.append('image ({} => {})'.format(container['Image'], image['Id'])) + self.reload_reasons.append('image ({0} => {1})'.format(container['Image'], image['Id'])) differing.append(container) continue @@ -799,7 +799,7 @@ class DockerManager(object): actual_command = container["Config"]["Cmd"] if actual_command != expected_command: - self.reload_reasons.append('command ({} => {})'.format(actual_command, expected_command)) + self.reload_reasons.append('command ({0} => {1})'.format(actual_command, expected_command)) differing.append(container) continue @@ -811,7 +811,7 @@ class DockerManager(object): actually_exposed_ports = set((container["Config"]["ExposedPorts"] or {}).keys()) if actually_exposed_ports != expected_exposed_ports: - self.reload_reasons.append('exposed_ports ({} => {})'.format(actually_exposed_ports, expected_exposed_ports)) + self.reload_reasons.append('exposed_ports ({0} => {1})'.format(actually_exposed_ports, expected_exposed_ports)) differing.append(container) continue @@ -824,7 +824,7 @@ class DockerManager(object): actual_volume_keys = set((container['Config']['Volumes'] or {}).keys()) if actual_volume_keys != expected_volume_keys: - self.reload_reasons.append('volumes ({} => {})'.format(actual_volume_keys, expected_volume_keys)) + self.reload_reasons.append('volumes ({0} => {1})'.format(actual_volume_keys, expected_volume_keys)) differing.append(container) continue @@ -834,7 +834,7 @@ class DockerManager(object): actual_mem = container['Config']['Memory'] if expected_mem and actual_mem != expected_mem: - self.reload_reasons.append('memory ({} => {})'.format(actual_mem, expected_mem)) + self.reload_reasons.append('memory ({0} => {1})'.format(actual_mem, expected_mem)) differing.append(container) continue @@ -868,7 +868,7 @@ class DockerManager(object): expected_hostname = self.module.params.get('hostname') actual_hostname = container['Config']['Hostname'] if expected_hostname and actual_hostname != expected_hostname: - self.reload_reasons.append('hostname ({} => {})'.format(actual_hostname, expected_hostname)) + self.reload_reasons.append('hostname ({0} => {1})'.format(actual_hostname, expected_hostname)) differing.append(container) continue @@ -877,7 +877,7 @@ class DockerManager(object): expected_domainname = self.module.params.get('domainname') actual_domainname = container['Config']['Domainname'] if expected_domainname and actual_domainname != expected_domainname: - self.reload_reasons.append('domainname ({} => {})'.format(actual_domainname, expected_domainname)) + self.reload_reasons.append('domainname ({0} => {1})'.format(actual_domainname, expected_domainname)) differing.append(container) continue @@ -897,7 +897,7 @@ class DockerManager(object): expected_stdin_open = self.module.params.get('stdin_open') actual_stdin_open = container['Config']['AttachStdin'] if actual_stdin_open != expected_stdin_open: - self.reload_reasons.append('stdin_open ({} => {})'.format(actual_stdin_open, expected_stdin_open)) + self.reload_reasons.append('stdin_open ({0} => {1})'.format(actual_stdin_open, expected_stdin_open)) differing.append(container) continue @@ -906,7 +906,7 @@ class DockerManager(object): expected_tty = self.module.params.get('tty') actual_tty = container['Config']['Tty'] if actual_tty != expected_tty: - self.reload_reasons.append('tty ({} => {})'.format(actual_tty, expected_tty)) + self.reload_reasons.append('tty ({0} => {1})'.format(actual_tty, expected_tty)) differing.append(container) continue @@ -918,7 +918,7 @@ class DockerManager(object): expected_lxc = set(self.lxc_conf) actual_lxc = set(container['HostConfig']['LxcConf'] or []) if actual_lxc != expected_lxc: - self.reload_reasons.append('lxc_conf ({} => {})'.format(actual_lxc, expected_lxc)) + self.reload_reasons.append('lxc_conf ({0} => {1})'.format(actual_lxc, expected_lxc)) differing.append(container) continue @@ -936,7 +936,7 @@ class DockerManager(object): else: container_path = config mode = 'rw' - expected_binds.add("{}:{}:{}".format(host_path, container_path, mode)) + expected_binds.add("{0}:{1}:{2}".format(host_path, container_path, mode)) actual_binds = set() for bind in (container['HostConfig']['Binds'] or []): @@ -946,7 +946,7 @@ class DockerManager(object): actual_binds.add(bind) if actual_binds != expected_binds: - self.reload_reasons.append('binds ({} => {})'.format(actual_binds, expected_binds)) + self.reload_reasons.append('binds ({0} => {1})'.format(actual_binds, expected_binds)) differing.append(container) continue @@ -956,7 +956,7 @@ class DockerManager(object): if self.port_bindings: for container_port, config in self.port_bindings.iteritems(): if isinstance(container_port, int): - container_port = "{}/tcp".format(container_port) + container_port = "{0}/tcp".format(container_port) bind = {} if len(config) == 1: bind['HostIp'] = "0.0.0.0" @@ -970,7 +970,7 @@ class DockerManager(object): actual_bound_ports = container['HostConfig']['PortBindings'] or {} if actual_bound_ports != expected_bound_ports: - self.reload_reasons.append('port bindings ({} => {})'.format(actual_bound_ports, expected_bound_ports)) + self.reload_reasons.append('port bindings ({0} => {1})'.format(actual_bound_ports, expected_bound_ports)) differing.append(container) continue @@ -984,7 +984,7 @@ class DockerManager(object): expected_privileged = self.module.params.get('privileged') actual_privileged = container['HostConfig']['Privileged'] if actual_privileged != expected_privileged: - self.reload_reasons.append('privileged ({} => {})'.format(actual_privileged, expected_privileged)) + self.reload_reasons.append('privileged ({0} => {1})'.format(actual_privileged, expected_privileged)) differing.append(container) continue @@ -992,11 +992,11 @@ class DockerManager(object): expected_links = set() for link, alias in (self.links or {}).iteritems(): - expected_links.add("/{}:/running/{}".format(link, alias)) + expected_links.add("/{0}:/running/{1}".format(link, alias)) actual_links = set(container['HostConfig']['Links'] or []) if actual_links != expected_links: - self.reload_reasons.append('links ({} => {})'.format(actual_links, expected_links)) + self.reload_reasons.append('links ({0} => {1})'.format(actual_links, expected_links)) differing.append(container) continue @@ -1005,7 +1005,7 @@ class DockerManager(object): expected_netmode = self.module.params.get('net') or '' actual_netmode = container['HostConfig']['NetworkMode'] if actual_netmode != expected_netmode: - self.reload_reasons.append('net ({} => {})'.format(actual_netmode, expected_netmode)) + self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode)) differing.append(container) continue @@ -1014,7 +1014,7 @@ class DockerManager(object): expected_dns = set(self.module.params.get('dns') or []) actual_dns = set(container['HostConfig']['Dns'] or []) if actual_dns != expected_dns: - self.reload_reasons.append('dns ({} => {})'.format(actual_dns, expected_dns)) + self.reload_reasons.append('dns ({0} => {1})'.format(actual_dns, expected_dns)) differing.append(container) continue @@ -1023,7 +1023,7 @@ class DockerManager(object): expected_volumes_from = set(self.module.params.get('volumes_from') or []) actual_volumes_from = set(container['HostConfig']['VolumesFrom'] or []) if actual_volumes_from != expected_volumes_from: - self.reload_reasons.append('volumes_from ({} => {})'.format(actual_volumes_from, expected_volumes_from)) + self.reload_reasons.append('volumes_from ({0} => {1})'.format(actual_volumes_from, expected_volumes_from)) differing.append(container) return differing From 9ecfde7551e9bb7653d250c162d7fdb32fa97a78 Mon Sep 17 00:00:00 2001 From: Ian Babrou Date: Mon, 9 Mar 2015 22:45:43 +0300 Subject: [PATCH 228/236] not triggering reload for links and not string env variables --- cloud/docker/docker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 7a836940852..1863c3c54a8 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -850,7 +850,7 @@ class DockerManager(object): if self.env: for name, value in self.env.iteritems(): - expected_env[name] = value + expected_env[name] = str(value) actual_env = {} for container_env in container['Config']['Env'] or []: @@ -859,7 +859,7 @@ class DockerManager(object): if actual_env != expected_env: # Don't include the environment difference in the output. - self.reload_reasons.append('environment') + self.reload_reasons.append('environment {0} => {1}'.format(actual_env, expected_env)) differing.append(container) continue @@ -992,7 +992,7 @@ class DockerManager(object): expected_links = set() for link, alias in (self.links or {}).iteritems(): - expected_links.add("/{0}:/running/{1}".format(link, alias)) + expected_links.add("/{0}:{1}/{2}".format(link, container["Name"], alias)) actual_links = set(container['HostConfig']['Links'] or []) if actual_links != expected_links: From 0b0040a4ae2d1a32d9c8d5856ed9dbb50e69bc10 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 10 Mar 2015 10:34:59 -0400 Subject: [PATCH 229/236] updated to keep same info as in extras repo --- README.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 0535e4a302b..09a35854879 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,22 @@ New module submissions for modules that do not yet exist should be submitted to Take care to submit tickets to the appropriate repo where modules are contained. The docs.ansible.com website indicates this at the bottom of each module documentation page. +Reporting bugs +============== + +Take care to submit tickets to the appropriate repo where modules are contained. The repo is mentioned at the bottom of module documentation page at [docs.ansible.com](http://docs.ansible.com/). + +Testing modules +=============== + +Ansible [module development guide](http://docs.ansible.com/developing_modules.html#testing-modules) contains the latest info about that. + License ======= -As with Ansible, modules distributed with Ansible are GPLv3 licensed. User generated modules not part of this project can be of any license. +As with Ansible, modules distributed with Ansible are GPLv3 licensed. User generated modules not part of this project can be of any license. + +Installation +============ + +There should be no need to install this repo separately as it should be included in any Ansible install using the official documented methods. From a4e65e8e6ed277792d7866fc70a3687afc9315a9 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 10 Mar 2015 11:30:12 -0500 Subject: [PATCH 230/236] Prevent an empty error message --- cloud/rackspace/rax.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cloud/rackspace/rax.py b/cloud/rackspace/rax.py index 288d7307b82..874274c22f3 100644 --- a/cloud/rackspace/rax.py +++ b/cloud/rackspace/rax.py @@ -314,7 +314,11 @@ def create(module, names=[], flavor=None, image=None, meta={}, key_name=None, block_device_mapping_v2=bdmv2, **extra_create_args)) except Exception, e: - module.fail_json(msg='%s' % e.message) + if e.message: + msg = str(e.message) + else: + msg = repr(e) + module.fail_json(msg=msg) else: changed = True From 1e36b6b7f6ea6840afaa3f4032ea721dd1e636be Mon Sep 17 00:00:00 2001 From: James Bowes Date: Tue, 10 Mar 2015 21:43:45 -0300 Subject: [PATCH 231/236] Fix C() formatting typo in file module --- files/file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/file.py b/files/file.py index 4eb6cb0b64f..8da87b0707e 100644 --- a/files/file.py +++ b/files/file.py @@ -57,7 +57,7 @@ options: or M(template) module if you want that behavior. If C(link), the symbolic link will be created or changed. Use C(hard) for hardlinks. If C(absent), directories will be recursively deleted, and files or symlinks will be unlinked. - If C(touch) (new in 1.4), an empty file will be created if the c(path) does not + If C(touch) (new in 1.4), an empty file will be created if the C(path) does not exist, while an existing file or directory will receive updated file access and modification times (similar to the way `touch` works from the command line). required: false From ca32205c7b7652247b001307003b508c50f9aa98 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 6 Mar 2015 21:51:51 -0800 Subject: [PATCH 232/236] Documentation style changes --- cloud/docker/docker.py | 137 +++++++++++++++++++++-------------------- 1 file changed, 69 insertions(+), 68 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 1863c3c54a8..88161c0abd3 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -27,174 +27,175 @@ module: docker version_added: "1.4" short_description: manage docker containers description: -- Manage the life cycle of docker containers. + - Manage the life cycle of docker containers. options: count: description: - - Number of matching containers that should be in the desired state. + - Number of matching containers that should be in the desired state. default: 1 image: description: - - Container image used to match and launch containers. + - Container image used to match and launch containers. required: true pull: description: - - Control when container images are updated from the C(docker_url) registry. - - If "missing," images will be pulled only when missing from the host; if - - '"always," the registry will be checked for a newer version of the image' - - each time the task executes. + - Control when container images are updated from the C(docker_url) registry. + If "missing," images will be pulled only when missing from the host; + if '"always," the registry will be checked for a newer version of the + image' each time the task executes. default: missing choices: [ "missing", "always" ] version_added: "1.9" command: description: - - Command used to match and launch containers. + - Command used to match and launch containers. default: null name: description: - - Name used to match and uniquely name launched containers. Explicit names - - are used to uniquely identify a single container or to link among - - containers. Mutually exclusive with a "count" other than "1". + - Name used to match and uniquely name launched containers. Explicit names + are used to uniquely identify a single container or to link among + containers. Mutually exclusive with a "count" other than "1". default: null version_added: "1.5" ports: description: - - List containing private to public port mapping specification. Use docker - - 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000) where' - - 8000 is a container port, 9000 is a host port, and 0.0.0.0 is a host - - interface. + - List containing private to public port mapping specification. Use docker + - 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000)' + - where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is + - a host interface. default: null version_added: "1.5" expose: description: - - List of additional container ports to expose for port mappings or links. - - If the port is already exposed using EXPOSE in a Dockerfile, you don't - - need to expose it again. + - List of additional container ports to expose for port mappings or links. + If the port is already exposed using EXPOSE in a Dockerfile, you don't + need to expose it again. default: null version_added: "1.5" publish_all_ports: description: - - Publish all exposed ports to the host interfaces. + - Publish all exposed ports to the host interfaces. default: false version_added: "1.5" volumes: description: - - List of volumes to mount within the container using docker CLI-style - - 'syntax: C(/host:/container[:mode]) where "mode" may be "rw" or "ro".' + - List of volumes to mount within the container using docker CLI-style + - 'syntax: C(/host:/container[:mode]) where "mode" may be "rw" or "ro".' default: null volumes_from: description: - - List of names of containers to mount volumes from. + - List of names of containers to mount volumes from. default: null links: description: - - List of other containers to link within this container with an optional - - 'alias. Use docker CLI-style syntax: C(redis:myredis).' + - List of other containers to link within this container with an optional + - 'alias. Use docker CLI-style syntax: C(redis:myredis).' default: null version_added: "1.5" memory_limit: description: - - RAM allocated to the container as a number of bytes or as a human-readable - - string like "512MB". Leave as "0" to specify no limit. + - RAM allocated to the container as a number of bytes or as a human-readable + string like "512MB". Leave as "0" to specify no limit. default: 0 docker_url: description: - - URL of the host running the docker daemon. This will default to the env - - var DOCKER_HOST if unspecified. + - URL of the host running the docker daemon. This will default to the env + var DOCKER_HOST if unspecified. default: ${DOCKER_HOST} or unix://var/run/docker.sock docker_tls_cert: description: - - Path to a PEM-encoded client certificate to secure the Docker connection. + - Path to a PEM-encoded client certificate to secure the Docker connection. default: ${DOCKER_CERT_PATH}/cert.pem docker_tls_key: description: - - Path to a PEM-encoded client key to secure the Docker connection. + - Path to a PEM-encoded client key to secure the Docker connection. default: ${DOCKER_CERT_PATH}/key.pem docker_tls_cacert: description: - - Path to a PEM-encoded certificate authority to secure the Docker connection. + - Path to a PEM-encoded certificate authority to secure the Docker connection. default: ${DOCKER_CERT_PATH}/ca.pem docker_api_version: description: - - Remote API version to use. This defaults to the current default as - - specified by docker-py. + - Remote API version to use. This defaults to the current default as + specified by docker-py. default: docker-py default remote API version version_added: "1.8" username: description: - - Remote API username. + - Remote API username. default: null password: description: - - Remote API password. + - Remote API password. default: null email: description: - - Remote API email. + - Remote API email. default: null hostname: description: - - Container hostname. + - Container hostname. default: null domainname: description: - - Container domain name. + - Container domain name. default: null env: description: - - Pass a dict of environment variables to the container. + - Pass a dict of environment variables to the container. default: null dns: description: - - List of custom DNS servers for the container. + - List of custom DNS servers for the container. required: false default: null detach: description: - - Enable detached mode to leave the container running in background. + - Enable detached mode to leave the container running in background. default: true state: description: - - Assert the container's desired state. "present" only asserts that the - - matching containers exist. "started" asserts that the matching containers - - both exist and are running, but takes no action if any configuration has - - changed. "reloaded" asserts that all matching containers are running and - - restarts any that have any images or configuration out of date. "restarted" - - unconditionally restarts (or starts) the matching containers. "stopped" and - - '"killed" stop and kill all matching containers. "absent" stops and then' - - removes any matching containers. + - Assert the container's desired state. "present" only asserts that the + matching containers exist. "started" asserts that the matching + containers both exist and are running, but takes no action if any + configuration has changed. "reloaded" asserts that all matching + containers are running and restarts any that have any images or + configuration out of date. "restarted" unconditionally restarts (or + starts) the matching containers. "stopped" and '"killed" stop and kill + all matching containers. "absent" stops and then' removes any matching + containers. required: false default: started choices: - - present - - started - - reloaded - - restarted - - stopped - - killed - - absent + - present + - started + - reloaded + - restarted + - stopped + - killed + - absent privileged: description: - - Whether the container should run in privileged mode or not. + - Whether the container should run in privileged mode or not. default: false lxc_conf: description: - - LXC configuration parameters, such as C(lxc.aa_profile:unconfined). + - LXC configuration parameters, such as C(lxc.aa_profile:unconfined). default: null stdin_open: description: - - Keep stdin open after a container is launched. + - Keep stdin open after a container is launched. default: false version_added: "1.6" tty: description: - - Allocate a pseudo-tty within the container. + - Allocate a pseudo-tty within the container. default: false version_added: "1.6" net: description: - - 'Network mode for the launched container: bridge, none, container:' - - or host. Requires docker >= 0.11. + - 'Network mode for the launched container: bridge, none, container:' + - or host. Requires docker >= 0.11. default: false version_added: "1.8" pid: @@ -206,26 +207,26 @@ options: version_added: "1.9" registry: description: - - Remote registry URL to pull images from. + - Remote registry URL to pull images from. default: DockerHub aliases: [] version_added: "1.8" restart_policy: description: - - Container restart policy. + - Container restart policy. choices: ["no", "on-failure", "always"] default: null version_added: "1.9" restart_policy_retry: description: - - Maximum number of times to restart a container. Leave as "0" for unlimited - - retries. + - Maximum number of times to restart a container. Leave as "0" for unlimited + retries. default: 0 version_added: "1.9" insecure_registry: description: - - Use insecure private registry by HTTP instead of HTTPS. Needed for - - docker-py >= 0.5.0. + - Use insecure private registry by HTTP instead of HTTPS. Needed for + docker-py >= 0.5.0. default: false version_added: "1.9" From d65f9aa3af80e22be0bee2ba8955cfee45c1f4ab Mon Sep 17 00:00:00 2001 From: Hagai Kariti Date: Wed, 11 Mar 2015 17:30:20 +0200 Subject: [PATCH 233/236] Fix KeyError in public zones in route53 --- cloud/amazon/route53.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index c499cfa4fdc..50de5cc3b09 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -261,7 +261,7 @@ def main(): for r53zone in results['ListHostedZonesResponse']['HostedZones']: # only save this zone id if the private status of the zone matches # the private_zone_in boolean specified in the params - if module.boolean(r53zone['Config']['PrivateZone']) == private_zone_in: + if module.boolean(r53zone['Config'].get('PrivateZone', False)) == private_zone_in: zone_id = r53zone['Id'].replace('/hostedzone/', '') zones[r53zone['Name']] = zone_id From 974075277d565ef6f3837b300744a959fbb5f716 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 11 Mar 2015 08:59:38 -0700 Subject: [PATCH 234/236] Also don't fail if files are different and overwrite is false for uploads (this mimics copy and template). Related to #489 --- cloud/amazon/s3.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index f7abff18dca..8fe908e1514 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -377,8 +377,8 @@ def main(): if overwrite is True: download_s3file(module, s3, bucket, obj, dest) else: - module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=False) - + module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.") + # Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message. if sum_matches is True and overwrite is False: module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False) @@ -388,8 +388,8 @@ def main(): download_s3file(module, s3, bucket, obj, dest) # If sum does not match but the destination exists, we - - # if our mode is a PUT operation (upload), go through the procedure as appropriate ... + + # if our mode is a PUT operation (upload), go through the procedure as appropriate ... if mode == 'put': # Use this snippet to debug through conditionals: @@ -400,7 +400,7 @@ def main(): pathrtn = path_check(src) if pathrtn is False: module.fail_json(msg="Local object for PUT does not exist", failed=True) - + # Lets check to see if bucket exists to get ground truth. bucketrtn = bucket_check(module, s3, bucket) if bucketrtn is True: @@ -421,7 +421,7 @@ def main(): if overwrite is True: upload_s3file(module, s3, bucket, obj, src, expiry, metadata) else: - module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True) + module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.") # If neither exist (based on bucket existence), we can create both. if bucketrtn is False and pathrtn is True: From 8ed415589a1ba633e090f4827a7c5321bb561994 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 11 Mar 2015 18:39:24 -0700 Subject: [PATCH 235/236] Initialize the value of vpc_id variable --- cloud/amazon/ec2.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index b59d4bda669..5fb17255a0e 100755 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -783,6 +783,7 @@ def create_instances(module, ec2, vpc, override_count=None): module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)")) sys.exit(1) + vpc_id = None if vpc_subnet_id: vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id else: From 31cc5f543f4166eddb334340fd559765dc6c3940 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 11 Mar 2015 19:07:55 -0700 Subject: [PATCH 236/236] Fix for py2.6 (no dict comprehensions on py2.6) --- cloud/amazon/ec2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 5fb17255a0e..10791439556 100755 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -593,7 +593,8 @@ def get_instance_info(inst): 'state': inst.state, 'hypervisor': inst.hypervisor, 'tags': inst.tags, - 'groups': {group.id: group.name for group in inst.groups}} + 'groups': dict((group.id, group.name) for group in inst.groups), + } try: instance_info['virtualization_type'] = getattr(inst,'virtualization_type') except AttributeError: