From d7efb2635c003bdb6dcb8e1321f8c94434f720cf Mon Sep 17 00:00:00 2001 From: Jan Inowolski Date: Thu, 29 Jan 2015 12:38:58 +0100 Subject: [PATCH 01/48] update git remote url before ls-remote related to #8177 --- source_control/git.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/source_control/git.py b/source_control/git.py index 0cb87304a92..fb7af79f2c4 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -473,10 +473,20 @@ def get_head_branch(git_path, module, dest, remote, bare=False): f.close() return branch -def fetch(git_path, module, repo, dest, version, remote, bare, refspec): +def set_remote_url(git_path, module, repo, dest, remote): ''' updates repo from remote sources ''' commands = [("set a new url %s for %s" % (repo, remote), [git_path, 'remote', 'set-url', remote, repo])] + for (label,command) in commands: + (rc,out,err) = module.run_command(command, cwd=dest) + if rc != 0: + module.fail_json(msg="Failed to %s: %s %s" % (label, out, err)) + +def fetch(git_path, module, repo, dest, version, remote, bare, refspec): + ''' updates repo from remote sources ''' + set_remote_url(git_path, module, repo, dest, remote) + commands = [] + fetch_str = 'download remote objects and refs' if bare: @@ -709,6 +719,7 @@ def main(): if not module.check_mode: reset(git_path, module, dest) # exit if already at desired sha version + set_remote_url(git_path, module, repo, dest, remote) remote_head = get_remote_head(git_path, module, dest, version, remote, bare) if before == remote_head: if local_mods: From a24ffc105636ebec2db80b05696dafeb9cc2979f Mon Sep 17 00:00:00 2001 From: Nikolay Ivanko Date: Mon, 13 Jul 2015 14:31:39 +0300 Subject: [PATCH 02/48] add virtual floppy to VMware guest --- cloud/vmware/vsphere_guest.py | 62 +++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index f0239544cec..732f1d13108 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -165,6 +165,9 @@ EXAMPLES = ''' vm_cdrom: type: "iso" iso_path: "DatastoreName/cd-image.iso" + vm_floppy: + type: "image" + image_path: "DatastoreName/floppy-image.flp" esxi: datacenter: MyDatacenter hostname: esx001.mydomain.local @@ -357,6 +360,44 @@ def add_cdrom(module, s, config_target, config, devices, default_devs, type="cli devices.append(cd_spec) +def add_floppy(module, s, config_target, config, devices, default_devs, type="image", vm_floppy_image_path=None): + # Add a floppy + # Make sure the datastore exists. + if vm_floppy_image_path: + image_location = vm_floppy_image_path.split('/', 1) + datastore, ds = find_datastore( + module, s, image_location[0], config_target) + image_path = image_location[1] + + floppy_spec = config.new_deviceChange() + floppy_spec.set_element_operation('add') + floppy_ctrl = VI.ns0.VirtualFloppy_Def("floppy_ctrl").pyclass() + + if type == "image": + image = VI.ns0.VirtualFloppyImageBackingInfo_Def("image").pyclass() + ds_ref = image.new_datastore(ds) + ds_ref.set_attribute_type(ds.get_attribute_type()) + image.set_element_datastore(ds_ref) + image.set_element_fileName("%s %s" % (datastore, image_path)) + floppy_ctrl.set_element_backing(image) + floppy_ctrl.set_element_key(3) + floppy_spec.set_element_device(floppy_ctrl) + elif type == "client": + client = VI.ns0.VirtualFloppyRemoteDeviceBackingInfo_Def( + "client").pyclass() + client.set_element_deviceName("/dev/fd0") + floppy_ctrl.set_element_backing(client) + floppy_ctrl.set_element_key(3) + floppy_spec.set_element_device(floppy_ctrl) + else: + s.disconnect() + module.fail_json( + msg="Error adding floppy of type %s to vm spec. " + " floppy type can either be image or client" % (type)) + + devices.append(floppy_spec) + + def add_nic(module, s, nfmor, config, devices, nic_type="vmxnet3", network_name="VM Network", network_type="standard"): # add a NIC # Different network card types are: "VirtualE1000", @@ -922,6 +963,27 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, # Add a CD-ROM device to the VM. add_cdrom(module, vsphere_client, config_target, config, devices, default_devs, cdrom_type, cdrom_iso_path) + if 'vm_floppy' in vm_hardware: + floppy_image_path = None + floppy_type = None + try: + floppy_type = vm_hardware['vm_floppy']['type'] + except KeyError: + vsphere_client.disconnect() + module.fail_json( + msg="Error on %s definition. floppy type needs to be" + " specified." % vm_hardware['vm_floppy']) + if floppy_type == 'image': + try: + floppy_image_path = vm_hardware['vm_floppy']['image_path'] + except KeyError: + vsphere_client.disconnect() + module.fail_json( + msg="Error on %s definition. floppy image_path needs" + " to be specified." % vm_hardware['vm_floppy']) + # Add a floppy to the VM. + add_floppy(module, vsphere_client, config_target, config, devices, + default_devs, floppy_type, floppy_image_path) if vm_nic: for nic in sorted(vm_nic.iterkeys()): try: From 8e7d9be02bc1f4f12dc538684e3353a8f8883b97 Mon Sep 17 00:00:00 2001 From: Andrew Briening Date: Thu, 25 Jun 2015 16:52:23 -0400 Subject: [PATCH 03/48] Adds basic authentication & skip certificate validation to win_get_url module --- windows/win_get_url.ps1 | 17 +++++++++++++++++ windows/win_get_url.py | 17 ++++++++++++++++- 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index 46979c129f2..525854eae87 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -40,11 +40,23 @@ Else { Fail-Json $result "missing required argument: dest" } +$skip_certificate_validation = Get-Attr $params "skip_certificate_validation" $false | ConvertTo-Bool +$username = Get-Attr $params "username" +$password = Get-Attr $params "password" + +if($skip_certificate_validation){ + [System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true} +} + $force = Get-Attr -obj $params -name "force" "yes" | ConvertTo-Bool If ($force -or -not (Test-Path $dest)) { $client = New-Object System.Net.WebClient + if($username -and $password){ + $client.Credentials = New-Object System.Net.NetworkCredential($username, $password) + } + Try { $client.DownloadFile($url, $dest) $result.changed = $true @@ -56,6 +68,11 @@ If ($force -or -not (Test-Path $dest)) { Else { Try { $webRequest = [System.Net.HttpWebRequest]::Create($url) + + if($username -and $password){ + $webRequest.Credentials = New-Object System.Net.NetworkCredential($username, $password) + } + $webRequest.IfModifiedSince = ([System.IO.FileInfo]$dest).LastWriteTime $webRequest.Method = "GET" [System.Net.HttpWebResponse]$webResponse = $webRequest.GetResponse() diff --git a/windows/win_get_url.py b/windows/win_get_url.py index a34f23890b5..5c3e994d418 100644 --- a/windows/win_get_url.py +++ b/windows/win_get_url.py @@ -28,6 +28,7 @@ version_added: "1.7" short_description: Fetches a file from a given URL description: - Fetches a file from a URL and saves to locally +author: "Paul Durivage (@angstwad)" options: url: description: @@ -49,7 +50,21 @@ options: required: false choices: [ "yes", "no" ] default: yes -author: "Paul Durivage (@angstwad)" + username: + description: + - Basic authentication username + required: false + default: null + password: + description: + - Basic authentication password + required: false + default: null + skip_certificate_validation: + description: + - Skip SSL certificate validation if true + required: false + default: false ''' EXAMPLES = ''' From 625fb1e182db778b6e67b0dc1f46001c1b23a565 Mon Sep 17 00:00:00 2001 From: Andrew Briening Date: Thu, 16 Jul 2015 15:01:09 -0400 Subject: [PATCH 04/48] Show the exception messages --- windows/win_get_url.ps1 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index 525854eae87..18977bff1ef 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -62,7 +62,7 @@ If ($force -or -not (Test-Path $dest)) { $result.changed = $true } Catch { - Fail-Json $result "Error downloading $url to $dest" + Fail-Json $result "Error downloading $url to $dest $($_.Exception.Message)" } } Else { @@ -85,11 +85,11 @@ Else { } Catch [System.Net.WebException] { If ($_.Exception.Response.StatusCode -ne [System.Net.HttpStatusCode]::NotModified) { - Fail-Json $result "Error downloading $url to $dest" + Fail-Json $result "Error downloading $url to $dest $($_.Exception.Message)" } } Catch { - Fail-Json $result "Error downloading $url to $dest" + Fail-Json $result "Error downloading $url to $dest $($_.Exception.Message)" } } From e5e0a70fc1e7a4b3f1a89be376bdd1178ff3c988 Mon Sep 17 00:00:00 2001 From: queridiculo Date: Wed, 1 Jul 2015 17:14:55 -0400 Subject: [PATCH 05/48] yum: improved check_mode handling and package update flow. --- packaging/os/yum.py | 165 ++++++++++++++++++++++++++++++-------------- 1 file changed, 113 insertions(+), 52 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 14339b4c18b..cf321b31d13 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -212,7 +212,7 @@ def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, di for rid in en_repos: my.repos.enableRepo(rid) - e,m,u = my.rpmdb.matchPackageNames([pkgspec]) + e, m, u = my.rpmdb.matchPackageNames([pkgspec]) pkgs = e + m if not pkgs: pkgs.extend(my.returnInstalledPackagesByDep(pkgspec)) @@ -224,16 +224,16 @@ def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, di else: cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, pkgspec] - rc,out,err = module.run_command(cmd) + rc, out, err = module.run_command(cmd) if not is_pkg: cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, "--whatprovides", pkgspec] - rc2,out2,err2 = module.run_command(cmd) + rc2, out2, err2 = module.run_command(cmd) else: - rc2,out2,err2 = (0, '', '') + rc2, out2, err2 = (0, '', '') if rc == 0 and rc2 == 0: out += out2 - return [ p for p in out.split('\n') if p.strip() ] + return [p for p in out.split('\n') if p.strip()] else: module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2)) @@ -541,7 +541,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): module.fail_json(msg="Failure downloading %s, %s" % (spec, e)) #groups :( - elif spec.startswith('@'): + elif spec.startswith('@'): # complete wild ass guess b/c it's a group pkg = spec @@ -608,7 +608,11 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): shutil.rmtree(tempdir) except Exception, e: module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e)) - module.exit_json(changed=True) + + for p in pkgs: + # take note of which packages are getting installed + res['results'].append('%s will be installed' % p) + module.exit_json(changed=True, results=res['results']) changed = True @@ -676,7 +680,10 @@ def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): cmd = yum_basecmd + ["remove"] + pkgs if module.check_mode: - module.exit_json(changed=True) + # take note of which packages are getting removed + for p in pkgs: + res['results'].append('%s will be removed' % p) + module.exit_json(changed=True, results=res['results']) rc, out, err = module.run_command(cmd) @@ -711,47 +718,69 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): res['msg'] = '' res['changed'] = False res['rc'] = 0 - - for spec in items: - - pkg = None - basecmd = 'update' - cmd = '' - # groups, again - if spec.startswith('@'): - pkg = spec - - elif spec == '*': #update all - # use check-update to see if there is any need - rc,out,err = module.run_command(yum_basecmd + ['check-update']) - if rc == 100: - cmd = yum_basecmd + [basecmd] - else: - res['results'].append('All packages up to date') + pkgs = {} + pkgs['update'] = [] + pkgs['install'] = [] + updates = {} + update_all = False + cmd = None + + # determine if we're doing an update all + if '*' in items: + update_all = True + + # run check-update to see if we have packages pending + rc, out, err = module.run_command(yum_basecmd + ['check-update']) + if rc == 0 and update_all: + res['results'].append('Nothing to do here, all packages are up to date') + return res + elif rc == 100: + available_updates = out.split('\n') + # build update dictionary + for line in available_updates: + line = line.split() + # ignore irrelevant lines + # FIXME... revisit for something less kludgy + if '*' in line or len(line) != 3 or '.' not in line[0]: continue - - # dep/pkgname - find it - else: - if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos): - basecmd = 'update' else: - basecmd = 'install' + pkg, version, repo = line + name, dist = pkg.split('.') + updates.update({name: {'version': version, 'dist': dist, 'repo': repo}}) + elif rc == 1: + res['msg'] = err + res['rc'] = rc + module.fail_json(**res) + if update_all: + cmd = yum_basecmd + ['update'] + else: + for spec in items: + # some guess work involved with groups. update @ will install the group if missing + if spec.startswith('@'): + pkgs['update'].append(spec) + continue + # dep/pkgname - find it + else: + if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos): + pkgs['update'].append(spec) + else: + pkgs['install'].append(spec) pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos) + # FIXME..? may not be desirable to throw an exception here if a single package is missing if not pkglist: res['msg'] += "No Package matching '%s' found available, installed or updated" % spec module.fail_json(**res) - + nothing_to_do = True for this in pkglist: - if basecmd == 'install' and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos): + if spec in pkgs['install'] and is_available(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos): nothing_to_do = False break - - if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos): - nothing_to_do = False - break - + + if spec in pkgs['update'] and spec in updates.keys(): + nothing_to_do = False + if nothing_to_do: res['results'].append("All packages providing %s are up to date" % spec) continue @@ -763,26 +792,59 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts) module.fail_json(**res) - pkg = spec - if not cmd: - cmd = yum_basecmd + [basecmd, pkg] + # list of package updates + if update_all: + will_update = updates.keys() + else: + will_update = [u for u in pkgs['update'] if u in updates.keys() or u.startswith('@')] - if module.check_mode: - return module.exit_json(changed=True) + # check_mode output + if module.check_mode: + for w in will_update: + if w.startswith('@'): + msg = '%s will be updated' % w + else: + msg = '%s will be updated with %s-%s.%s from %s' % (w, w, updates[w]['version'], updates[w]['dist'], updates[w]['repo']) + res['results'].append(msg) - rc, out, err = module.run_command(cmd) + for p in pkgs['install']: + res['results'].append('%s will be installed' % p) - res['rc'] += rc - res['results'].append(out) - res['msg'] += err + if len(will_update) > 0 or len(pkgs['install']) > 0: + res['changed'] = True - # FIXME if it is - update it and check to see if it applied - # check to see if there is no longer an update available for the pkgspec + return res - if rc: - res['failed'] = True + # run commands + if cmd: # update all + rc, out, err = module.run_command(cmd) + res['changed'] = True + else: + if len(pkgs['install']) > 0: # install missing + cmd = yum_basecmd + ['install'] + pkgs['install'] + rc, out, err = module.run_command(cmd) + res['changed'] = True else: + rc, out, err = [0, '', ''] + + if len(will_update) > 0: # update present + cmd = yum_basecmd + ['update'] + pkgs['update'] + rc2, out2, err2 = module.run_command(cmd) res['changed'] = True + else: + rc2, out2, err2 = [0, '', ''] + + if not update_all: + rc += rc2 + out += out2 + err += err2 + + res['rc'] += rc + res['msg'] += err + res['results'].append(out) + + if rc: + res['failed'] = True return res @@ -927,4 +989,3 @@ from ansible.module_utils.basic import * from ansible.module_utils.urls import * if __name__ == '__main__': main() - From 520a125693cc51e174b16517510c5bb4faa7b51c Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Sat, 25 Apr 2015 00:12:25 -0400 Subject: [PATCH 06/48] bugfixes for redhat_subscription - correctly return pool ids for newer versions of subscription-manager - allow for managing subscriptions after initial registration. --- packaging/os/redhat_subscription.py | 77 +++++++++++++++++++++++------ 1 file changed, 62 insertions(+), 15 deletions(-) diff --git a/packaging/os/redhat_subscription.py b/packaging/os/redhat_subscription.py index 1cfd8fc25a6..0c4647ccf2a 100644 --- a/packaging/os/redhat_subscription.py +++ b/packaging/os/redhat_subscription.py @@ -180,7 +180,7 @@ class Rhsm(RegistrationBase): for k,v in kwargs.items(): if re.search(r'^(system|rhsm)_', k): args.append('--%s=%s' % (k.replace('_','.'), v)) - + self.module.run_command(args, check_rc=True) @property @@ -226,14 +226,26 @@ class Rhsm(RegistrationBase): rc, stderr, stdout = self.module.run_command(args, check_rc=True) - def unsubscribe(self): + def unsubscribe(self, serials=None): ''' - Unsubscribe a system from all subscribed channels + Unsubscribe a system from subscribed channels + Args: + serials(list or None): list of serials to unsubscribe. If + serials is none or an empty list, then + all subscribed channels will be removed. Raises: * Exception - if error occurs while running command ''' - args = ['subscription-manager', 'unsubscribe', '--all'] - rc, stderr, stdout = self.module.run_command(args, check_rc=True) + items = [] + if serials is not None and serials: + items = ["--serial=%s" % s for s in serials] + if serials is None: + items = ["--all"] + + if items: + args = ['subscription-manager', 'unsubscribe'] + items + rc, stderr, stdout = self.module.run_command(args, check_rc=True) + return serials def unregister(self): ''' @@ -255,8 +267,27 @@ class Rhsm(RegistrationBase): # Available pools ready for subscription available_pools = RhsmPools(self.module) + subscribed_pool_ids = [] for pool in available_pools.filter(regexp): pool.subscribe() + subscribed_pool_ids.append(pool.get_pool_id()) + return subscribed_pool_ids + + def update_subscriptions(self, regexp): + changed=False + consumed_pools = RhsmPools(self.module, consumed=True) + pool_ids_to_keep = [p.get_pool_id() for p in consumed_pools.filter(regexp)] + + serials_to_remove=[p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep] + serials = self.unsubscribe(serials=serials_to_remove) + + subscribed_pool_ids = self.subscribe(regexp) + + if subscribed_pool_ids or serials: + changed=True + return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids, + 'unsubscribed_serials': serials} + class RhsmPool(object): @@ -272,8 +303,11 @@ class RhsmPool(object): def __str__(self): return str(self.__getattribute__('_name')) + def get_pool_id(self): + return getattr(self, 'PoolId', getattr(self, 'PoolID')) + def subscribe(self): - args = "subscription-manager subscribe --pool %s" % self.PoolId + args = "subscription-manager subscribe --pool %s" % self.get_pool_id() rc, stdout, stderr = self.module.run_command(args, check_rc=True) if rc == 0: return True @@ -285,18 +319,22 @@ class RhsmPools(object): """ This class is used for manipulating pools subscriptions with RHSM """ - def __init__(self, module): + def __init__(self, module, consumed=False): self.module = module - self.products = self._load_product_list() + self.products = self._load_product_list(consumed) def __iter__(self): return self.products.__iter__() - def _load_product_list(self): + def _load_product_list(self, consumed=False): """ - Loads list of all available pools for system in data structure + Loads list of all available or consumed pools for system in data structure + + Args: + consumed(bool): if True list consumed pools, else list available pools (default False) """ - args = "subscription-manager list --available" + args = "subscription-manager list" + args += " --consumed" if consumed else " --available" rc, stdout, stderr = self.module.run_command(args, check_rc=True) products = [] @@ -375,18 +413,27 @@ def main(): # Register system if rhn.is_registered: - module.exit_json(changed=False, msg="System already registered.") + if pool != '^$': + try: + result = rhn.update_subscriptions(pool) + except Exception, e: + module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, e)) + else: + module.exit_json(**result) + else: + module.exit_json(changed=False, msg="System already registered.") else: try: rhn.enable() rhn.configure(**module.params) rhn.register(username, password, autosubscribe, activationkey, org_id) - rhn.subscribe(pool) + subscribed_pool_ids = rhn.subscribe(pool) except Exception, e: module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, e)) else: - module.exit_json(changed=True, msg="System successfully registered to '%s'." % server_hostname) - + module.exit_json(changed=True, + msg="System successfully registered to '%s'." % server_hostname, + subscribed_pool_ids=subscribed_pool_ids) # Ensure system is *not* registered if state == 'absent': if not rhn.is_registered: From 08021026348cdab4c4651154755567f546beecc3 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Tue, 28 Jul 2015 06:23:20 -0400 Subject: [PATCH 07/48] Add example for changing consumed subscriptions --- packaging/os/redhat_subscription.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packaging/os/redhat_subscription.py b/packaging/os/redhat_subscription.py index 0c4647ccf2a..233d1a04e2b 100644 --- a/packaging/os/redhat_subscription.py +++ b/packaging/os/redhat_subscription.py @@ -76,6 +76,12 @@ EXAMPLES = ''' - redhat_subscription: state=present activationkey=1-222333444 pool='^(Red Hat Enterprise Server|Red Hat Virtualization)$' + +# Update the consumed subscriptions from the previous example (remove the Red +# Hat Virtualization subscription) +- redhat_subscription: state=present + activationkey=1-222333444 + pool='^Red Hat Enterprise Server$' ''' import os From c458b5e96cbee9359ead3540365120f2215e0517 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Tue, 28 Jul 2015 06:30:37 -0400 Subject: [PATCH 08/48] python 2.4 syntax fix --- packaging/os/redhat_subscription.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packaging/os/redhat_subscription.py b/packaging/os/redhat_subscription.py index 233d1a04e2b..8e1482a8c4f 100644 --- a/packaging/os/redhat_subscription.py +++ b/packaging/os/redhat_subscription.py @@ -340,7 +340,10 @@ class RhsmPools(object): consumed(bool): if True list consumed pools, else list available pools (default False) """ args = "subscription-manager list" - args += " --consumed" if consumed else " --available" + if consumed: + args += " --consumed" + else: + args += " --available" rc, stdout, stderr = self.module.run_command(args, check_rc=True) products = [] From 8ad072c96fc8125d5142293c4f2eb451edb23b24 Mon Sep 17 00:00:00 2001 From: Shobhit Srivastava Date: Tue, 4 Aug 2015 12:52:56 +0530 Subject: [PATCH 09/48] checking remote_group_id while comparing os_security_group_rule --- cloud/openstack/os_security_group_rule.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/openstack/os_security_group_rule.py b/cloud/openstack/os_security_group_rule.py index 91059aca015..b2324b097ce 100644 --- a/cloud/openstack/os_security_group_rule.py +++ b/cloud/openstack/os_security_group_rule.py @@ -213,12 +213,14 @@ def _find_matching_rule(module, secgroup): remote_ip_prefix = module.params['remote_ip_prefix'] ethertype = module.params['ethertype'] direction = module.params['direction'] + remote_group_id = module.params['remote_group'] for rule in secgroup['security_group_rules']: if (protocol == rule['protocol'] and remote_ip_prefix == rule['remote_ip_prefix'] and ethertype == rule['ethertype'] and direction == rule['direction'] + and remote_group_id == rule['remote_group_id'] and _ports_match(protocol, module.params['port_range_min'], module.params['port_range_max'], From 06e722900e9c487da22074b10c2b1387993a9826 Mon Sep 17 00:00:00 2001 From: Evan Carter Date: Wed, 5 Aug 2015 14:22:09 -0400 Subject: [PATCH 10/48] Adding the ability to associate eips with network interfaces --- cloud/amazon/ec2_eip.py | 151 +++++++++++++++++++++++++++------------- 1 file changed, 103 insertions(+), 48 deletions(-) diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py index a564612e220..6fc1360d3fe 100644 --- a/cloud/amazon/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -26,6 +26,11 @@ options: description: - The EC2 instance id required: false + network_interface_id: + description: + - The Elastic Network Interface (ENI) id + required: false + version_added: "2.0" public_ip: description: - The elastic IP address to associate with the instance. @@ -57,7 +62,6 @@ options: required: false default: false version_added: "1.6" - extends_documentation_fragment: aws author: "Lorin Hochstein (@lorin) " notes: @@ -72,22 +76,21 @@ notes: EXAMPLES = ''' - name: associate an elastic IP with an instance ec2_eip: instance_id=i-1212f003 ip=93.184.216.119 - +- name: associate an elastic IP with a device + ec2_eip: network_interface_id=eni-c8ad70f3 ip=93.184.216.119 - name: disassociate an elastic IP from an instance ec2_eip: instance_id=i-1212f003 ip=93.184.216.119 state=absent - +- name: disassociate an elastic IP with a device + ec2_eip: network_interface_id=eni-c8ad70f3 ip=93.184.216.119 state=absent - name: allocate a new elastic IP and associate it with an instance ec2_eip: instance_id=i-1212f003 - - name: allocate a new elastic IP without associating it to anything action: ec2_eip register: eip - name: output the IP debug: msg="Allocated IP is {{ eip.public_ip }}" - - name: another way of allocating an elastic IP without associating it to anything ec2_eip: state='present' - - name: provision new instances with ec2 ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes''' ''' group=webserver count=3 @@ -95,7 +98,6 @@ EXAMPLES = ''' - name: associate new elastic IPs with each of the instances ec2_eip: "instance_id={{ item }}" with_items: ec2.instance_ids - - name: allocate a new elastic IP inside a VPC in us-west-2 ec2_eip: region=us-west-2 in_vpc=yes register: eip @@ -113,27 +115,27 @@ except ImportError: class EIPException(Exception): pass - -def associate_ip_and_instance(ec2, address, instance_id, check_mode): - if address_is_associated_with_instance(ec2, address, instance_id): +def associate_ip_and_device(ec2, address, device_id, check_mode, isinstance=True): + if address_is_associated_with_device(ec2, address, device_id, isinstance): return {'changed': False} # If we're in check mode, nothing else to do if not check_mode: - if address.domain == 'vpc': - res = ec2.associate_address(instance_id, - allocation_id=address.allocation_id) + if isinstance: + if address.domain == "vpc": + res = ec2.associate_address(device_id, allocation_id=address.allocation_id) + else: + res = ec2.associate_address(device_id, public_ip=address.public_ip) else: - res = ec2.associate_address(instance_id, - public_ip=address.public_ip) + res = ec2.associate_address(network_interface_id=device_id, allocation_id=address.allocation_id) if not res: raise EIPException('association failed') return {'changed': True} -def disassociate_ip_and_instance(ec2, address, instance_id, check_mode): - if not address_is_associated_with_instance(ec2, address, instance_id): +def disassociate_ip_and_device(ec2, address, device_id, check_mode, isinstance=True): + if not address_is_associated_with_device(ec2, address, device_id, isinstance): return {'changed': False} # If we're in check mode, nothing else to do @@ -158,24 +160,33 @@ def _find_address_by_ip(ec2, public_ip): raise -def _find_address_by_instance_id(ec2, instance_id): - addresses = ec2.get_all_addresses(None, {'instance-id': instance_id}) +def _find_address_by_device_id(ec2, device_id, isinstance=True): + if isinstance: + addresses = ec2.get_all_addresses(None, {'instance-id': device_id}) + else: + addresses = ec2.get_all_addresses(None, {'network-interface-id': device_id}) if addresses: return addresses[0] -def find_address(ec2, public_ip, instance_id): +def find_address(ec2, public_ip, device_id, isinstance=True): """ Find an existing Elastic IP address """ if public_ip: return _find_address_by_ip(ec2, public_ip) - elif instance_id: - return _find_address_by_instance_id(ec2, instance_id) + elif device_id and isinstance: + return _find_address_by_device_id(ec2, device_id) + elif device_id: + return _find_address_by_device_id(ec2, device_id, isinstance=False) -def address_is_associated_with_instance(ec2, address, instance_id): - """ Check if the elastic IP is currently associated with the instance """ +def address_is_associated_with_device(ec2, address, device_id, isinstance=True): + """ Check if the elastic IP is currently associated with the device """ + address = ec2.get_all_addresses(address.public_ip) if address: - return address and address.instance_id == instance_id + if isinstance: + return address and address[0].instance_id == device_id + else: + return address and address[0].network_interface_id == device_id return False @@ -186,7 +197,7 @@ def allocate_address(ec2, domain, reuse_existing_ip_allowed): all_addresses = ec2.get_all_addresses(filters=domain_filter) unassociated_addresses = [a for a in all_addresses - if not a.instance_id] + if not a.device_id] if unassociated_addresses: return unassociated_addresses[0] @@ -204,21 +215,33 @@ def release_address(ec2, address, check_mode): return {'changed': True} -def find_instance(ec2, instance_id): +def find_device(ec2, device_id, isinstance=True): """ Attempt to find the EC2 instance and return it """ - reservations = ec2.get_all_reservations(instance_ids=[instance_id]) + if isinstance: + try: + reservations = ec2.get_all_reservations(instance_ids=[device_id]) + except boto.exception.EC2ResponseError, e: + module.fail_json(msg=str(e)) + + if len(reservations) == 1: + instances = reservations[0].instances + if len(instances) == 1: + return instances[0] + else: + try: + interfaces = ec2.get_all_network_interfaces(network_interface_ids=[device_id]) + except boto.exception.EC2ResponseError, e: + module.fail_json(msg=str(e)) - if len(reservations) == 1: - instances = reservations[0].instances - if len(instances) == 1: - return instances[0] + if len(interfaces) == 1: + return interfaces[0] - raise EIPException("could not find instance" + instance_id) + raise EIPException("could not find instance" + device_id) -def ensure_present(ec2, domain, address, instance_id, - reuse_existing_ip_allowed, check_mode): +def ensure_present(ec2, domain, address, device_id, + reuse_existing_ip_allowed, check_mode, isinstance=True): changed = False # Return the EIP object since we've been given a public IP @@ -229,28 +252,39 @@ def ensure_present(ec2, domain, address, instance_id, address = allocate_address(ec2, domain, reuse_existing_ip_allowed) changed = True - if instance_id: + if device_id: # Allocate an IP for instance since no public_ip was provided - instance = find_instance(ec2, instance_id) + if isinstance: + instance = find_device(ec2, device_id) + # Associate address object (provided or allocated) with instance + assoc_result = associate_ip_and_device(ec2, address, device_id, + check_mode) + else: + instance = find_device(ec2, device_id, isinstance=False) + # Associate address object (provided or allocated) with instance + assoc_result = associate_ip_and_device(ec2, address, device_id, + check_mode, isinstance=False) + if instance.vpc_id: domain = 'vpc' - # Associate address object (provided or allocated) with instance - assoc_result = associate_ip_and_instance(ec2, address, instance_id, - check_mode) changed = changed or assoc_result['changed'] return {'changed': changed, 'public_ip': address.public_ip} -def ensure_absent(ec2, domain, address, instance_id, check_mode): +def ensure_absent(ec2, domain, address, device_id, check_mode, isinstance=True): if not address: return {'changed': False} # disassociating address from instance - if instance_id: - return disassociate_ip_and_instance(ec2, address, instance_id, - check_mode) + if device_id: + if isinstance: + return disassociate_ip_and_device(ec2, address, device_id, + check_mode) + else: + return disassociate_ip_and_device(ec2, address, device_id, + check_mode, isinstance=False) # releasing address else: return release_address(ec2, address, check_mode) @@ -260,6 +294,7 @@ def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( instance_id=dict(required=False), + network_interface_id=dict(required=False), public_ip=dict(required=False, aliases=['ip']), state=dict(required=False, default='present', choices=['present', 'absent']), @@ -280,6 +315,7 @@ def main(): ec2 = ec2_connect(module) instance_id = module.params.get('instance_id') + network_interface_id = module.params.get('network_interface_id') public_ip = module.params.get('public_ip') state = module.params.get('state') in_vpc = module.params.get('in_vpc') @@ -287,20 +323,39 @@ def main(): reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') try: - address = find_address(ec2, public_ip, instance_id) + if network_interface_id: + address = find_address(ec2, public_ip, network_interface_id, isinstance=False) + elif instance_id: + address = find_address(ec2, public_ip, instance_id) + else: + address = False if state == 'present': - result = ensure_present(ec2, domain, address, instance_id, + if instance_id: + result = ensure_present(ec2, domain, address, instance_id, reuse_existing_ip_allowed, module.check_mode) + elif network_interface_id: + result = ensure_present(ec2, domain, address, network_interface_id, + reuse_existing_ip_allowed, + module.check_mode, isinstance=False) + else: + address = allocate_address(ec2, domain, reuse_existing_ip_allowed) + result = {'changed': True, 'public_ip': address.public_ip} else: - result = ensure_absent(ec2, domain, address, instance_id, module.check_mode) + if network_interface_id: + result = ensure_absent(ec2, domain, address, network_interface_id, module.check_mode, isinstance=False) + elif instance_id: + result = ensure_absent(ec2, domain, address, instance_id, module.check_mode) + else: + address = find_address(ec2, public_ip, None) + result = release_address(ec2, address, module.check_mode) + except (boto.exception.EC2ResponseError, EIPException) as e: module.fail_json(msg=str(e)) module.exit_json(**result) - # import module snippets from ansible.module_utils.basic import * # noqa from ansible.module_utils.ec2 import * # noqa From 6a8108133ddcb7ac0450c0f0894948eca30b363e Mon Sep 17 00:00:00 2001 From: "David M. Lee" Date: Fri, 14 Aug 2015 13:01:44 -0500 Subject: [PATCH 11/48] ec2_vol: Added missing "needs 2.0" doc The ability to find-or-create a volume was added in 2.0. Added note to the example. --- cloud/amazon/ec2_vol.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 4b829f7c26e..228bb12cfbc 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -146,6 +146,7 @@ EXAMPLES = ''' # Example: Launch an instance and then add a volume if not already attached # * Volume will be created with the given name if not already created. # * Nothing will happen if the volume is already attached. +# * Requires Ansible 2.0 - ec2: keypair: "{{ keypair }}" From d9511729208cdef60630a13d30bdf67008b40522 Mon Sep 17 00:00:00 2001 From: Ilya Epifanov Date: Sun, 16 Aug 2015 18:34:56 +0300 Subject: [PATCH 12/48] fixed memory_limit for docker api version >= 1.19 --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 1cf85843e0e..e045e2ce1fc 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1428,7 +1428,7 @@ class DockerManager(object): if api_version < 1.19: params['mem_limit'] = mem_limit else: - params['host_config']['mem_limit'] = mem_limit + params['host_config']['Memory'] = mem_limit def do_create(count, params): From f459b3773cba0579d4a2ab5440e366c1cdd5b76a Mon Sep 17 00:00:00 2001 From: Mahesh Sawaiker Date: Mon, 17 Aug 2015 16:28:18 +0000 Subject: [PATCH 13/48] support creating role only --- cloud/openstack/keystone_user.py | 33 +++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/cloud/openstack/keystone_user.py b/cloud/openstack/keystone_user.py index a3529c290b3..2596eab980c 100644 --- a/cloud/openstack/keystone_user.py +++ b/cloud/openstack/keystone_user.py @@ -252,8 +252,17 @@ def ensure_user_exists(keystone, user_name, password, email, tenant_name, email=email, tenant_id=tenant.id) return (True, user.id) +def ensure_role_exists(keystone, role_name): + # Get the role if it exists + try: + role = get_role(keystone, role_name) + except KeyError: + # Role doesn't exist yet + role = keystone.roles.create(role_name) + return (True, role.id) + -def ensure_role_exists(keystone, user_name, tenant_name, role_name, +def ensure_user_role_exists(keystone, user_name, tenant_name, role_name, check_mode): """ Check if role exists @@ -297,9 +306,11 @@ def ensure_user_absent(keystone, user, check_mode): raise NotImplementedError("Not yet implemented") -def ensure_role_absent(keystone, uesr, tenant, role, check_mode): +def ensure_user_role_absent(keystone, uesr, tenant, role, check_mode): raise NotImplementedError("Not yet implemented") +def ensure_role_absent(keystone, role_name): + raise NotImplementedError("Not yet implemented") def main(): @@ -378,14 +389,18 @@ def dispatch(keystone, user=None, password=None, tenant=None, X absent ensure_tenant_absent X X present ensure_user_exists X X absent ensure_user_absent - X X X present ensure_role_exists - X X X absent ensure_role_absent - - + X X X present ensure_user_role_exists + X X X absent ensure_user_role_absent + X present ensure_role_exists + X absent ensure_role_absent """ changed = False id = None - if tenant and not user and not role and state == "present": + if not tenant and not user and role and state == "present": + ensure_role_exists(keystone, role) + elif not tenant and not user and role and state == "absent": + ensure_role_absent(keystone, role) + elif tenant and not user and not role and state == "present": changed, id = ensure_tenant_exists(keystone, tenant, tenant_description, check_mode) elif tenant and not user and not role and state == "absent": @@ -396,10 +411,10 @@ def dispatch(keystone, user=None, password=None, tenant=None, elif tenant and user and not role and state == "absent": changed = ensure_user_absent(keystone, user, check_mode) elif tenant and user and role and state == "present": - changed, id = ensure_role_exists(keystone, user, tenant, role, + changed, id = ensure_user_role_exists(keystone, user, tenant, role, check_mode) elif tenant and user and role and state == "absent": - changed = ensure_role_absent(keystone, user, tenant, role, check_mode) + changed = ensure_user_role_absent(keystone, user, tenant, role, check_mode) else: # Should never reach here raise ValueError("Code should never reach here") From dfac073343b5cf293de0c96fa9777acfefe3af55 Mon Sep 17 00:00:00 2001 From: HAMSIK Adam Date: Thu, 9 Jul 2015 23:55:56 +0200 Subject: [PATCH 14/48] Rebase start/stop instance pull code --- cloud/amazon/ec2.py | 98 ++++++++++++++++++++++++++++----------------- 1 file changed, 62 insertions(+), 36 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 55c45a647f4..e8aaccaa10f 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -144,7 +144,7 @@ options: instance_tags: version_added: "1.0" description: - - a hash/dictionary of tags to add to the new instance; '{"key":"value"}' and '{"key":"value","key":"value"}' + - a hash/dictionary of tags to add to the new instance or for for starting/stopping instance by tag; '{"key":"value"}' and '{"key":"value","key":"value"}' required: false default: null aliases: [] @@ -229,19 +229,19 @@ options: exact_count: version_added: "1.5" description: - - An integer value which indicates how many instances that match the 'count_tag' parameter should be running. Instances are either created or terminated based on this value. + - An integer value which indicates how many instances that match the 'count_tag' parameter should be running. Instances are either created or terminated based on this value. required: false default: null aliases: [] count_tag: version_added: "1.5" description: - - Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running. This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers that are tagged with "class=webserver". + - Used with 'exact_count' to determine how many nodes based on a specific tag criteria should be running. This can be expressed in multiple ways and is shown in the EXAMPLES section. For instance, one can request 25 servers that are tagged with "class=webserver". required: false default: null aliases: [] -author: +author: - "Tim Gerla (@tgerla)" - "Lester Wade (@lwade)" - "Seth Vidal" @@ -271,7 +271,7 @@ EXAMPLES = ''' wait: yes wait_timeout: 500 count: 5 - instance_tags: + instance_tags: db: postgres monitoring: yes vpc_subnet_id: subnet-29e63245 @@ -305,7 +305,7 @@ EXAMPLES = ''' wait: yes wait_timeout: 500 count: 5 - instance_tags: + instance_tags: db: postgres monitoring: yes vpc_subnet_id: subnet-29e63245 @@ -366,7 +366,7 @@ EXAMPLES = ''' region: us-east-1 tasks: - name: Launch instance - ec2: + ec2: key_name: "{{ keypair }}" group: "{{ security_group }}" instance_type: "{{ instance_type }}" @@ -446,6 +446,15 @@ EXAMPLES = ''' vpc_subnet_id: subnet-29e63245 assign_public_ip: yes +# +# Start stopped instances specified by tag +# +- local_action: + module: ec2 + instance_tags: + Name: ExtraPower + state: running + # # Enforce that 5 instances with a tag "foo" are running # (Highly recommended!) @@ -474,11 +483,11 @@ EXAMPLES = ''' image: ami-40603AD1 wait: yes group: webserver - instance_tags: + instance_tags: Name: database dbtype: postgres exact_count: 5 - count_tag: + count_tag: Name: database dbtype: postgres vpc_subnet_id: subnet-29e63245 @@ -531,8 +540,8 @@ def find_running_instances_by_count_tag(module, ec2, count_tag, zone=None): for res in reservations: if hasattr(res, 'instances'): for inst in res.instances: - instances.append(inst) - + instances.append(inst) + return reservations, instances @@ -543,7 +552,7 @@ def _set_none_to_blank(dictionary): result[k] = _set_none_to_blank(result[k]) elif not result[k]: result[k] = "" - return result + return result def get_reservations(module, ec2, tags=None, state=None, zone=None): @@ -682,7 +691,7 @@ def create_block_device(module, ec2, volume): # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/ MAX_IOPS_TO_SIZE_RATIO = 30 if 'snapshot' not in volume and 'ephemeral' not in volume: - if 'volume_size' not in volume: + if 'volume_size' not in volume: module.fail_json(msg = 'Size must be specified when creating a new volume or modifying the root volume') if 'snapshot' in volume: if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume: @@ -695,7 +704,7 @@ def create_block_device(module, ec2, volume): if 'encrypted' in volume: module.fail_json(msg = 'You can not set encyrption when creating a volume from a snapshot') if 'ephemeral' in volume: - if 'snapshot' in volume: + if 'snapshot' in volume: module.fail_json(msg = 'Cannot set both ephemeral and snapshot') return BlockDeviceType(snapshot_id=volume.get('snapshot'), ephemeral_name=volume.get('ephemeral'), @@ -760,18 +769,18 @@ def enforce_count(module, ec2, vpc): for inst in instance_dict_array: inst['state'] = "terminated" terminated_list.append(inst) - instance_dict_array = terminated_list - - # ensure all instances are dictionaries + instance_dict_array = terminated_list + + # ensure all instances are dictionaries all_instances = [] for inst in instances: if type(inst) is not dict: inst = get_instance_info(inst) - all_instances.append(inst) + all_instances.append(inst) return (all_instances, instance_dict_array, changed_instance_ids, changed) - - + + def create_instances(module, ec2, vpc, override_count=None): """ Creates new instances @@ -879,7 +888,7 @@ def create_instances(module, ec2, vpc, override_count=None): if ebs_optimized: params['ebs_optimized'] = ebs_optimized - + # 'tenancy' always has a default value, but it is not a valid parameter for spot instance resquest if not spot_price: params['tenancy'] = tenancy @@ -912,7 +921,7 @@ def create_instances(module, ec2, vpc, override_count=None): groups=group_id, associate_public_ip_address=assign_public_ip) interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface) - params['network_interfaces'] = interfaces + params['network_interfaces'] = interfaces else: params['subnet_id'] = vpc_subnet_id if vpc_subnet_id: @@ -922,11 +931,11 @@ def create_instances(module, ec2, vpc, override_count=None): if volumes: bdm = BlockDeviceMapping() - for volume in volumes: + for volume in volumes: if 'device_name' not in volume: module.fail_json(msg = 'Device name must be set for volume') # Minimum volume size is 1GB. We'll use volume size explicitly set to 0 - # to be a signal not to create this volume + # to be a signal not to create this volume if 'volume_size' not in volume or int(volume['volume_size']) > 0: bdm[volume['device_name']] = create_block_device(module, ec2, volume) @@ -1016,7 +1025,7 @@ def create_instances(module, ec2, vpc, override_count=None): num_running = 0 wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and num_running < len(instids): - try: + try: res_list = ec2.get_all_instances(instids) except boto.exception.BotoServerError, e: if e.error_code == 'InvalidInstanceID.NotFound': @@ -1029,7 +1038,7 @@ def create_instances(module, ec2, vpc, override_count=None): for res in res_list: num_running += len([ i for i in res.instances if i.state=='running' ]) if len(res_list) <= 0: - # got a bad response of some sort, possibly due to + # got a bad response of some sort, possibly due to # stale/cached data. Wait a second and then try again time.sleep(1) continue @@ -1141,12 +1150,12 @@ def terminate_instances(module, ec2, instance_ids): filters={'instance-state-name':'terminated'}): for inst in res.instances: instance_dict_array.append(get_instance_info(inst)) - + return (changed, instance_dict_array, terminated_instance_ids) -def startstop_instances(module, ec2, instance_ids, state): +def startstop_instances(module, ec2, instance_ids, state, instance_tags): """ Starts or stops a list of existing instances @@ -1154,6 +1163,8 @@ def startstop_instances(module, ec2, instance_ids, state): ec2: authenticated ec2 connection object instance_ids: The list of instances to start in the form of [ {id: }, ..] + instance_tags: A dict of tag keys and values in the form of + {key: value, ... } state: Intended state ("running" or "stopped") Returns a dictionary of instance information @@ -1162,19 +1173,33 @@ def startstop_instances(module, ec2, instance_ids, state): If the instance was not able to change state, "changed" will be set to False. + Note that if instance_ids and instance_tags are both non-empty, + this method will process the intersection of the two """ - + wait = module.params.get('wait') wait_timeout = int(module.params.get('wait_timeout')) changed = False instance_dict_array = [] - + if not isinstance(instance_ids, list) or len(instance_ids) < 1: - module.fail_json(msg='instance_ids should be a list of instances, aborting') + # Fail unless the user defined instance tags + if not instance_tags: + module.fail_json(msg='instance_ids should be a list of instances, aborting') + + # To make an EC2 tag filter, we need to prepend 'tag:' to each key. + # An empty filter does no filtering, so it's safe to pass it to the + # get_all_instances method even if the user did not specify instance_tags + filters = {} + if instance_tags: + for key, value in instance_tags.items(): + filters["tag:" + key] = value + + # Check that our instances are not in the state we want to take # Check (and eventually change) instances attributes and instances state running_instances_array = [] - for res in ec2.get_all_instances(instance_ids): + for res in ec2.get_all_instances(instance_ids, filters=filters): for inst in res.instances: # Check "source_dest_check" attribute @@ -1295,11 +1320,12 @@ def main(): (changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids) elif state in ('running', 'stopped'): - instance_ids = module.params['instance_ids'] - if not instance_ids: - module.fail_json(msg='instance_ids list is requried for %s state' % state) + instance_ids = module.params.get('instance_ids') + instance_tags = module.params.get('instance_tags') + if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)): + module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids) - (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state) + (changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags) elif state == 'present': # Changed is always set to true when provisioning new instances From 8215aad3b3345be74d1a4f9dc90bc8019d48fb81 Mon Sep 17 00:00:00 2001 From: Jason Cowley Date: Wed, 19 Aug 2015 11:30:21 -0700 Subject: [PATCH 15/48] Add support for S3 canned permissions. resolves #1939 --- cloud/amazon/s3.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 5c97031c09c..811978a0f0e 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -93,6 +93,12 @@ options: - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. required: false default: null + permission: + description: + - This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'public-read-write', 'authenticated-read'. + required: false + default: private + version_added: "2.0" prefix: description: - Limits the response to keys that begin with the specified prefix for list mode @@ -167,7 +173,7 @@ EXAMPLES = ''' - s3: bucket=mybucket mode=list prefix=/my/desired/ marker=/my/desired/0023.txt max_keys=472 # Create an empty bucket -- s3: bucket=mybucket mode=create +- s3: bucket=mybucket mode=create permission=public-read # Create a bucket with key as directory, in the EU region - s3: bucket=mybucket object=/my/directory/path mode=create region=eu-west-1 @@ -236,6 +242,7 @@ def create_bucket(module, s3, bucket, location=None): location = Location.DEFAULT try: bucket = s3.create_bucket(bucket, location=location) + bucket.set_acl(module.params.get('permission')) except s3.provider.storage_response_error, e: module.fail_json(msg= str(e)) if bucket: @@ -306,6 +313,7 @@ def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, heade key.set_metadata(meta_key, metadata[meta_key]) key.set_contents_from_filename(src, encrypt_key=encrypt, headers=headers) + key.set_acl(module.params.get('permission')) url = key.generate_url(expiry) module.exit_json(msg="PUT operation complete", url=url, changed=True) except s3.provider.storage_copy_error, e: @@ -378,6 +386,7 @@ def main(): metadata = dict(type='dict'), mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True), object = dict(), + permission = dict(choices=['private', 'public-read', 'public-read-write', 'authenticated-read'], default='private'), version = dict(default=None), overwrite = dict(aliases=['force'], default='always'), prefix = dict(default=None), From 4171d167f481928b6d14c66749fe9be5a0595543 Mon Sep 17 00:00:00 2001 From: Nithy Renganathan Date: Thu, 20 Aug 2015 14:24:05 +0000 Subject: [PATCH 16/48] Handle the changed value --- cloud/openstack/keystone_user.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/cloud/openstack/keystone_user.py b/cloud/openstack/keystone_user.py index 2596eab980c..babcc3cc569 100644 --- a/cloud/openstack/keystone_user.py +++ b/cloud/openstack/keystone_user.py @@ -256,11 +256,14 @@ def ensure_role_exists(keystone, role_name): # Get the role if it exists try: role = get_role(keystone, role_name) + # Role does exist, we're done + return (False, role.id) except KeyError: # Role doesn't exist yet - role = keystone.roles.create(role_name) - return (True, role.id) + pass + role = keystone.roles.create(role_name) + return (True, role.id) def ensure_user_role_exists(keystone, user_name, tenant_name, role_name, check_mode): @@ -397,9 +400,9 @@ def dispatch(keystone, user=None, password=None, tenant=None, changed = False id = None if not tenant and not user and role and state == "present": - ensure_role_exists(keystone, role) + changed, id = ensure_role_exists(keystone, role) elif not tenant and not user and role and state == "absent": - ensure_role_absent(keystone, role) + changed = ensure_role_absent(keystone, role) elif tenant and not user and not role and state == "present": changed, id = ensure_tenant_exists(keystone, tenant, tenant_description, check_mode) From e4a5f3b4099765a103ab949741312e520e8d35d6 Mon Sep 17 00:00:00 2001 From: Brian Richards Date: Thu, 20 Aug 2015 11:07:58 -0500 Subject: [PATCH 17/48] Preventing servers that are deleted and left in the deleted state from being included in the server list --- cloud/rackspace/rax_facts.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/rackspace/rax_facts.py b/cloud/rackspace/rax_facts.py index c30df5b9462..481732c0af7 100644 --- a/cloud/rackspace/rax_facts.py +++ b/cloud/rackspace/rax_facts.py @@ -97,7 +97,9 @@ def rax_facts(module, address, name, server_id): servers.append(cs.servers.get(server_id)) except Exception, e: pass - + + servers[:] = [server for server in servers if server.status != "DELETED"] + if len(servers) > 1: module.fail_json(msg='Multiple servers found matching provided ' 'search parameters') From 9d4694122deeb3a5f5f193dcf32851f36decd73c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 20 Aug 2015 13:02:29 -0700 Subject: [PATCH 18/48] Return change results in a dictionary listing the package names. Fix a parsing problem when package names contain a dot. --- packaging/os/yum.py | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index cf321b31d13..5778bc38c3c 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -609,10 +609,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): except Exception, e: module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e)) - for p in pkgs: - # take note of which packages are getting installed - res['results'].append('%s will be installed' % p) - module.exit_json(changed=True, results=res['results']) + module.exit_json(changed=True, results=res['results'], changes=dict(installed=pkgs)) changed = True @@ -680,10 +677,7 @@ def remove(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): cmd = yum_basecmd + ["remove"] + pkgs if module.check_mode: - # take note of which packages are getting removed - for p in pkgs: - res['results'].append('%s will be removed' % p) - module.exit_json(changed=True, results=res['results']) + module.exit_json(changed=True, results=res['results'], changes=dict(removed=pkgs)) rc, out, err = module.run_command(cmd) @@ -745,7 +739,7 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): continue else: pkg, version, repo = line - name, dist = pkg.split('.') + name, dist = pkg.rsplit('.', 1) updates.update({name: {'version': version, 'dist': dist, 'repo': repo}}) elif rc == 1: res['msg'] = err @@ -800,15 +794,15 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): # check_mode output if module.check_mode: + to_update = [] for w in will_update: if w.startswith('@'): + to_update.append((w, None)) msg = '%s will be updated' % w else: - msg = '%s will be updated with %s-%s.%s from %s' % (w, w, updates[w]['version'], updates[w]['dist'], updates[w]['repo']) - res['results'].append(msg) + to_update.append((w, '%s.%s from %s' % (updates[w]['version'], updates[w]['dist'], updates[w]['repo']))) - for p in pkgs['install']: - res['results'].append('%s will be installed' % p) + res['changes'] = dict(installed=pkgs['install'], updated=to_update) if len(will_update) > 0 or len(pkgs['install']) > 0: res['changed'] = True From 4721d6d8b5e251054b01ddaf8bb852e8204c2c9f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 20 Aug 2015 15:30:32 -0700 Subject: [PATCH 19/48] Fix for the case where plugins aren't loaded on old RHEL systems --- packaging/os/yum.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 423f59981ae..c66e73ad98b 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -972,10 +972,15 @@ def main(): # loaded and plugins are discovered my.conf repoquery = None - if 'rhnplugin' in my.plugins._plugins: - repoquerybin = ensure_yum_utils(module) - if repoquerybin: - repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet'] + try: + yum_plugins = my.plugins._plugins + except AttributeError: + pass + else: + if 'rhnplugin' in yum_plugins: + repoquerybin = ensure_yum_utils(module) + if repoquerybin: + repoquery = [repoquerybin, '--show-duplicates', '--plugins', '--quiet'] pkg = [ p.strip() for p in params['name']] exclude = params['exclude'] From 428550e179f7a57202b452ba3e530b3c791f695e Mon Sep 17 00:00:00 2001 From: Simon Li Date: Fri, 21 Aug 2015 17:55:28 +0100 Subject: [PATCH 20/48] Don't fail in check_mode if user exists PR #1651 fixed issue #1515 but the requirement for path to be defined is unecessarily strict. If the user has previously been created a path isn't necessary. --- system/authorized_key.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index f9f773d8d90..376cf4c61dc 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -169,16 +169,15 @@ def keyfile(module, user, write=False, path=None, manage_dir=True): :return: full path string to authorized_keys for user """ - if module.check_mode: - if path is None: - module.fail_json(msg="You must provide full path to key file in check mode") - else: - keysfile = path - return keysfile + if module.check_mode and path is not None: + keysfile = path + return keysfile try: user_entry = pwd.getpwnam(user) except KeyError, e: + if module.check_mode and path is None: + module.fail_json(msg="Either user must exist or you must provide full path to key file in check mode") module.fail_json(msg="Failed to lookup user %s: %s" % (user, str(e))) if path is None: homedir = user_entry.pw_dir From f7f621839ad2063c707506f7eaf5663f113664a7 Mon Sep 17 00:00:00 2001 From: Bruno Galindro da Costa Date: Fri, 21 Aug 2015 14:13:09 -0300 Subject: [PATCH 21/48] Added termination_policies option --- cloud/amazon/ec2_asg.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index efcd66606b8..e67d2a07d39 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -126,6 +126,13 @@ options: version_added: "1.9" default: yes required: False + termination_policies: + description: + - An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity. + required: false + default: Default. E.g.: When used to create a new autoscaling group, the “Default” value is used. When used to change an existent autoscaling group, the current termination policies are mantained + choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default'] + version_added: "2.0" extends_documentation_fragment: aws """ @@ -421,7 +428,8 @@ def create_autoscaling_group(connection, module): tags=asg_tags, health_check_period=health_check_period, health_check_type=health_check_type, - default_cooldown=default_cooldown) + default_cooldown=default_cooldown, + termination_policies=termination_policies) try: connection.create_auto_scaling_group(ag) @@ -783,7 +791,8 @@ def main(): health_check_period=dict(type='int', default=300), health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), default_cooldown=dict(type='int', default=300), - wait_for_instances=dict(type='bool', default=True) + wait_for_instances=dict(type='bool', default=True), + termination_policies=dict(type='list', default=None) ), ) From 2a5f3754e78664d96529f18b9f7bebce6722c629 Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Fri, 21 Aug 2015 13:37:29 -0700 Subject: [PATCH 22/48] added windows facts ansible_lastboot, ansible_uptime_seconds switched OS object to Get-CimInstance since we need a DateTime object for lastbootuptime --- windows/setup.ps1 | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/windows/setup.ps1 b/windows/setup.ps1 index 32b4d865263..bd2f6ac8c76 100644 --- a/windows/setup.ps1 +++ b/windows/setup.ps1 @@ -25,7 +25,7 @@ $result = New-Object psobject @{ changed = $false }; -$win32_os = Get-WmiObject Win32_OperatingSystem +$win32_os = Get-CimInstance Win32_OperatingSystem $osversion = [Environment]::OSVersion $memory = @() $memory += Get-WmiObject win32_Physicalmemory @@ -66,6 +66,9 @@ Set-Attr $result.ansible_facts "ansible_distribution_version" $osversion.Version Set-Attr $result.ansible_facts "ansible_totalmem" $capacity +Set-Attr $result.ansible_facts "ansible_lastboot" $win32_os.lastbootuptime.ToString("u") +Set-Attr $result.ansible_facts "ansible_uptime_seconds" $([System.Convert]::ToInt64($(Get-Date).Subtract($win32_os.lastbootuptime).TotalSeconds)) + $ips = @() Foreach ($ip in $netcfg.IPAddress) { If ($ip) { $ips += $ip } } Set-Attr $result.ansible_facts "ansible_ip_addresses" $ips From 85ddb1b90232dbd68798e9b2d7dafa5689a1d30e Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Thu, 20 Aug 2015 17:32:05 -0400 Subject: [PATCH 23/48] Fixing region requirement regarding euca clusters Fixes ansible/ansible#11023 --- cloud/amazon/ec2.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 55c45a647f4..c2b57eb7cd3 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -824,7 +824,10 @@ def create_instances(module, ec2, vpc, override_count=None): vpc_id = None if vpc_subnet_id: - vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id + if not vpc: + module.fail_json(msg="region must be specified") + else: + vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id else: vpc_id = None @@ -1281,7 +1284,7 @@ def main(): except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg = str(e)) else: - module.fail_json(msg="region must be specified") + vpc = None tagged_instances = [] From 1d074d43aa8584f8de01edaee1ffe456a5ab4844 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Sat, 22 Aug 2015 19:01:11 -0400 Subject: [PATCH 24/48] * Update core modules to fix strict mode errors. * Also fix creates parameter issue in win_msi as described in https://github.com/ansible/ansible-modules-core/issues/129, slightly different fix from https://github.com/ansible/ansible-modules-core/pull/1482 * Fix setup.ps1 module issue described in https://github.com/ansible/ansible-modules-core/issues/1927 --- windows/setup.ps1 | 2 +- windows/win_feature.ps1 | 48 +++++++------------------------------ windows/win_group.ps1 | 28 ++++++++++------------ windows/win_msi.ps1 | 32 ++++++++++--------------- windows/win_service.ps1 | 17 +++++++------ windows/win_stat.ps1 | 8 +++---- windows/win_user.ps1 | 53 ++++++++++++++--------------------------- 7 files changed, 64 insertions(+), 124 deletions(-) diff --git a/windows/setup.ps1 b/windows/setup.ps1 index bd2f6ac8c76..3e3317d0450 100644 --- a/windows/setup.ps1 +++ b/windows/setup.ps1 @@ -60,7 +60,7 @@ Set-Attr $result.ansible_facts "ansible_hostname" $env:COMPUTERNAME; Set-Attr $result.ansible_facts "ansible_fqdn" "$([System.Net.Dns]::GetHostByName((hostname)).HostName)" Set-Attr $result.ansible_facts "ansible_system" $osversion.Platform.ToString() Set-Attr $result.ansible_facts "ansible_os_family" "Windows" -Set-Attr $result.ansible_facts "ansible_os_name" $win32_os.Name.Split('|')[0] +Set-Attr $result.ansible_facts "ansible_os_name" ($win32_os.Name.Split('|')[0]).Trim() Set-Attr $result.ansible_facts "ansible_distribution" $osversion.VersionString Set-Attr $result.ansible_facts "ansible_distribution_version" $osversion.Version.ToString() diff --git a/windows/win_feature.ps1 b/windows/win_feature.ps1 index 458d942e328..ec6317fb89b 100644 --- a/windows/win_feature.ps1 +++ b/windows/win_feature.ps1 @@ -27,48 +27,18 @@ $result = New-Object PSObject -Property @{ changed = $false } -If ($params.name) { - $name = $params.name -split ',' | % { $_.Trim() } -} -Else { - Fail-Json $result "mising required argument: name" -} - -If ($params.state) { - $state = $params.state.ToString().ToLower() - If (($state -ne 'present') -and ($state -ne 'absent')) { - Fail-Json $result "state is '$state'; must be 'present' or 'absent'" - } -} -Elseif (!$params.state) { - $state = "present" -} - -If ($params.restart) { - $restart = $params.restart | ConvertTo-Bool -} -Else -{ - $restart = $false -} +$name = Get-Attr $params "name" -failifempty $true +$name = $name -split ',' | % { $_.Trim() } -if ($params.include_sub_features) -{ - $includesubfeatures = $params.include_sub_features | ConvertTo-Bool -} -Else -{ - $includesubfeatures = $false +$state = Get-Attr $params "state" "present" +$state = $state.ToString().ToLower() +If (($state -ne 'present') -and ($state -ne 'absent')) { + Fail-Json $result "state is '$state'; must be 'present' or 'absent'" } -if ($params.include_management_tools) -{ - $includemanagementtools = $params.include_management_tools | ConvertTo-Bool -} -Else -{ - $includemanagementtools = $false -} +$restart = Get-Attr $params "restart" $false | ConvertTo-Bool +$includesubfeatures = Get-Attr $params "include_sub_features" $false | ConvertTo-Bool +$includemanagementtools = Get-Attr $params "include_management_tools" $false | ConvertTo-Bool If ($state -eq "present") { try { diff --git a/windows/win_group.ps1 b/windows/win_group.ps1 index febaf47d014..c3fc920c916 100644 --- a/windows/win_group.ps1 +++ b/windows/win_group.ps1 @@ -24,35 +24,31 @@ $params = Parse-Args $args; $result = New-Object PSObject; Set-Attr $result "changed" $false; -If (-not $params.name.GetType) { - Fail-Json $result "missing required arguments: name" -} +$name = Get-Attr $params "name" -failifempty $true -If ($params.state) { - $state = $params.state.ToString().ToLower() - If (($state -ne "present") -and ($state -ne "absent")) { - Fail-Json $result "state is '$state'; must be 'present' or 'absent'" - } -} -Elseif (-not $params.state) { - $state = "present" +$state = Get-Attr $params "state" "present" +$state = $state.ToString().ToLower() +If (($state -ne "present") -and ($state -ne "absent")) { + Fail-Json $result "state is '$state'; must be 'present' or 'absent'" } +$description = Get-Attr $params "description" $null + $adsi = [ADSI]"WinNT://$env:COMPUTERNAME" -$group = $adsi.Children | Where-Object {$_.SchemaClassName -eq 'group' -and $_.Name -eq $params.name } +$group = $adsi.Children | Where-Object {$_.SchemaClassName -eq 'group' -and $_.Name -eq $name } try { If ($state -eq "present") { If (-not $group) { - $group = $adsi.Create("Group", $params.name) + $group = $adsi.Create("Group", $name) $group.SetInfo() Set-Attr $result "changed" $true } - If ($params.description.GetType) { - IF (-not $group.description -or $group.description -ne $params.description) { - $group.description = $params.description + If ($null -ne $description) { + IF (-not $group.description -or $group.description -ne $description) { + $group.description = $description $group.SetInfo() Set-Attr $result "changed" $true } diff --git a/windows/win_msi.ps1 b/windows/win_msi.ps1 index 1c2bc8a3019..f1381e9bf23 100644 --- a/windows/win_msi.ps1 +++ b/windows/win_msi.ps1 @@ -21,36 +21,28 @@ $params = Parse-Args $args; -$result = New-Object psobject; -Set-Attr $result "changed" $false; +$path = Get-Attr $params "path" -failifempty $true +$state = Get-Attr $params "state" "present" +$creates = Get-Attr $params "creates" $false +$extra_args = Get-Attr $params "extra_args" "" -If (-not $params.path.GetType) -{ - Fail-Json $result "missing required arguments: path" -} - -$extra_args = "" -If ($params.extra_args.GetType) -{ - $extra_args = $params.extra_args; -} +$result = New-Object psobject @{ + changed = $false +}; -If ($params.creates.GetType -and $params.state.GetType -and $params.state -ne "absent") +If (($creates -ne $false) -and ($state -ne "absent") -and (Test-Path $creates)) { - If (Test-File $creates) - { - Exit-Json $result; - } + Exit-Json $result; } $logfile = [IO.Path]::GetTempFileName(); -if ($params.state.GetType -and $params.state -eq "absent") +if ($state -eq "absent") { - msiexec.exe /x $params.path /qb /l $logfile $extra_args; + msiexec.exe /x $path /qn /l $logfile $extra_args } Else { - msiexec.exe /i $params.path /qb /l $logfile $extra_args; + msiexec.exe /i $path /qn /l $logfile $extra_args } Set-Attr $result "changed" $true; diff --git a/windows/win_service.ps1 b/windows/win_service.ps1 index a70d82a4ef3..4ea4e2697a1 100644 --- a/windows/win_service.ps1 +++ b/windows/win_service.ps1 @@ -24,26 +24,25 @@ $params = Parse-Args $args; $result = New-Object PSObject; Set-Attr $result "changed" $false; -If (-not $params.name.GetType) -{ - Fail-Json $result "missing required arguments: name" -} +$name = Get-Attr $params "name" -failifempty $true +$state = Get-Attr $params "state" $false +$startMode = Get-Attr $params "start_mode" $false -If ($params.state) { - $state = $params.state.ToString().ToLower() +If ($state) { + $state = $state.ToString().ToLower() If (($state -ne 'started') -and ($state -ne 'stopped') -and ($state -ne 'restarted')) { Fail-Json $result "state is '$state'; must be 'started', 'stopped', or 'restarted'" } } -If ($params.start_mode) { - $startMode = $params.start_mode.ToString().ToLower() +If ($startMode) { + $startMode = $startMode.ToString().ToLower() If (($startMode -ne 'auto') -and ($startMode -ne 'manual') -and ($startMode -ne 'disabled')) { Fail-Json $result "start mode is '$startMode'; must be 'auto', 'manual', or 'disabled'" } } -$svcName = $params.name +$svcName = $name $svc = Get-Service -Name $svcName -ErrorAction SilentlyContinue If (-not $svc) { Fail-Json $result "Service '$svcName' not installed" diff --git a/windows/win_stat.ps1 b/windows/win_stat.ps1 index cf8c14a4d49..af9cbd7eca5 100644 --- a/windows/win_stat.ps1 +++ b/windows/win_stat.ps1 @@ -42,14 +42,14 @@ If (Test-Path $path) Set-Attr $result.stat "exists" $TRUE; $info = Get-Item $path; $epoch_date = Get-Date -Date "01/01/1970" - If ($info.Directory) # Only files have the .Directory attribute. + If ($info.PSIsContainer) { - Set-Attr $result.stat "isdir" $FALSE; - Set-Attr $result.stat "size" $info.Length; + Set-Attr $result.stat "isdir" $TRUE; } Else { - Set-Attr $result.stat "isdir" $TRUE; + Set-Attr $result.stat "isdir" $FALSE; + Set-Attr $result.stat "size" $info.Length; } Set-Attr $result.stat "extension" $info.Extension; Set-Attr $result.stat "attributes" $info.Attributes.ToString(); diff --git a/windows/win_user.ps1 b/windows/win_user.ps1 index b7be7e4eea3..ac40ced2cbc 100644 --- a/windows/win_user.ps1 +++ b/windows/win_user.ps1 @@ -16,7 +16,6 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# WANT_JSON # POWERSHELL_COMMON ######## @@ -55,33 +54,21 @@ $result = New-Object psobject @{ changed = $false }; -If (-not $params.name.GetType) { - Fail-Json $result "missing required arguments: name" -} - -$username = Get-Attr $params "name" +$username = Get-Attr $params "name" -failifempty $true $fullname = Get-Attr $params "fullname" $description = Get-Attr $params "description" $password = Get-Attr $params "password" -If ($params.state) { - $state = $params.state.ToString().ToLower() - If (($state -ne 'present') -and ($state -ne 'absent') -and ($state -ne 'query')) { - Fail-Json $result "state is '$state'; must be 'present', 'absent' or 'query'" - } -} -ElseIf (!$params.state) { - $state = "present" +$state = Get-Attr $params "state" "present" +$state = $state.ToString().ToLower() +If (($state -ne 'present') -and ($state -ne 'absent') -and ($state -ne 'query')) { + Fail-Json $result "state is '$state'; must be 'present', 'absent' or 'query'" } -If ($params.update_password) { - $update_password = $params.update_password.ToString().ToLower() - If (($update_password -ne 'always') -and ($update_password -ne 'on_create')) { - Fail-Json $result "update_password is '$update_password'; must be 'always' or 'on_create'" - } -} -ElseIf (!$params.update_password) { - $update_password = "always" +$update_password = Get-Attr $params "update_password" "always" +$update_password = $update_password.ToString().ToLower() +If (($update_password -ne 'always') -and ($update_password -ne 'on_create')) { + Fail-Json $result "update_password is '$update_password'; must be 'always' or 'on_create'" } $password_expired = Get-Attr $params "password_expired" $null @@ -126,14 +113,10 @@ If ($groups -ne $null) { } } -If ($params.groups_action) { - $groups_action = $params.groups_action.ToString().ToLower() - If (($groups_action -ne 'replace') -and ($groups_action -ne 'add') -and ($groups_action -ne 'remove')) { - Fail-Json $result "groups_action is '$groups_action'; must be 'replace', 'add' or 'remove'" - } -} -ElseIf (!$params.groups_action) { - $groups_action = "replace" +$groups_action = Get-Attr $params "groups_action" "replace" +$groups_action = $groups_action.ToString().ToLower() +If (($groups_action -ne 'replace') -and ($groups_action -ne 'add') -and ($groups_action -ne 'remove')) { + Fail-Json $result "groups_action is '$groups_action'; must be 'replace', 'add' or 'remove'" } $user_obj = Get-User $username @@ -141,7 +124,7 @@ $user_obj = Get-User $username If ($state -eq 'present') { # Add or update user try { - If (!$user_obj.GetType) { + If (-not $user_obj -or -not $user_obj.GetType) { $user_obj = $adsi.Create("User", $username) If ($password -ne $null) { $user_obj.SetPassword($password) @@ -200,13 +183,13 @@ If ($state -eq 'present') { If ($result.changed) { $user_obj.SetInfo() } - If ($groups.GetType) { + If ($null -ne $groups) { [string[]]$current_groups = $user_obj.Groups() | ForEach { $_.GetType().InvokeMember("Name", "GetProperty", $null, $_, $null) } If (($groups_action -eq "remove") -or ($groups_action -eq "replace")) { ForEach ($grp in $current_groups) { If ((($groups_action -eq "remove") -and ($groups -contains $grp)) -or (($groups_action -eq "replace") -and ($groups -notcontains $grp))) { $group_obj = $adsi.Children | where { $_.SchemaClassName -eq 'Group' -and $_.Name -eq $grp } - If ($group_obj.GetType) { + If ($group_obj -and $group_obj.GetType) { $group_obj.Remove($user_obj.Path) $result.changed = $true } @@ -239,7 +222,7 @@ If ($state -eq 'present') { ElseIf ($state -eq 'absent') { # Remove user try { - If ($user_obj.GetType) { + If ($user_obj -and $user_obj.GetType) { $username = $user_obj.Name.Value $adsi.delete("User", $user_obj.Name.Value) $result.changed = $true @@ -252,7 +235,7 @@ ElseIf ($state -eq 'absent') { } try { - If ($user_obj.GetType) { + If ($user_obj -and $user_obj.GetType) { $user_obj.RefreshCache() Set-Attr $result "name" $user_obj.Name[0] Set-Attr $result "fullname" $user_obj.FullName[0] From daf7a0551beefab7047fb36a290c03bd828ec0d3 Mon Sep 17 00:00:00 2001 From: Omri Iluz Date: Sun, 23 Aug 2015 02:29:39 -0700 Subject: [PATCH 25/48] No need for .keys on volumes list Since https://github.com/ansible/ansible-modules-core/commit/c3f92cca210db1f7042bfce1ff90645255f0b49e changed "volumes" to be a list instead of a dictionary, we don't need (and cannot) .keys when appending to set. Reported as bug #1957 --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index e045e2ce1fc..82c39006678 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1020,7 +1020,7 @@ class DockerManager(object): expected_volume_keys = set((image['ContainerConfig']['Volumes'] or {}).keys()) if self.volumes: - expected_volume_keys.update(self.volumes.keys()) + expected_volume_keys.update(self.volumes) actual_volume_keys = set((container['Config']['Volumes'] or {}).keys()) From 39e7e05a8dc04613bcf8f5d213ea1fe90452dc32 Mon Sep 17 00:00:00 2001 From: Till Backhaus Date: Mon, 24 Aug 2015 20:06:53 +0200 Subject: [PATCH 26/48] Delete dead and broken code --- cloud/amazon/s3.py | 9 --------- cloud/google/gc_storage.py | 9 --------- 2 files changed, 18 deletions(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 5c97031c09c..e98308bb874 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -281,15 +281,6 @@ def create_dirkey(module, s3, bucket, obj): except s3.provider.storage_response_error, e: module.fail_json(msg= str(e)) -def upload_file_check(src): - if os.path.exists(src): - file_exists is True - else: - file_exists is False - if os.path.isdir(src): - module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True) - return file_exists - def path_check(path): if os.path.exists(path): return True diff --git a/cloud/google/gc_storage.py b/cloud/google/gc_storage.py index 291d4ca0f4d..37d61b0b268 100644 --- a/cloud/google/gc_storage.py +++ b/cloud/google/gc_storage.py @@ -211,15 +211,6 @@ def create_dirkey(module, gs, bucket, obj): except gs.provider.storage_response_error, e: module.fail_json(msg= str(e)) -def upload_file_check(src): - if os.path.exists(src): - file_exists is True - else: - file_exists is False - if os.path.isdir(src): - module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True) - return file_exists - def path_check(path): if os.path.exists(path): return True From eb17b6a36c029b92f069b61f38c4bddd848f1be2 Mon Sep 17 00:00:00 2001 From: Marius Gedminas Date: Tue, 25 Aug 2015 19:15:33 +0300 Subject: [PATCH 27/48] apt: check for "0 upgraded" to be at the beginning of the line Fixes #1678. --- packaging/os/apt.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) mode change 100644 => 100755 packaging/os/apt.py diff --git a/packaging/os/apt.py b/packaging/os/apt.py old mode 100644 new mode 100755 index 92b0f2fb8fd..1fd770f710e --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -179,8 +179,8 @@ APT_ENV_VARS = dict( ) DPKG_OPTIONS = 'force-confdef,force-confold' -APT_GET_ZERO = "0 upgraded, 0 newly installed" -APTITUDE_ZERO = "0 packages upgraded, 0 newly installed" +APT_GET_ZERO = "\n0 upgraded, 0 newly installed" +APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed" APT_LISTS_PATH = "/var/lib/apt/lists" APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp" From 4ae4331a6bfe716e65aec656bff0c51a78b02a40 Mon Sep 17 00:00:00 2001 From: Marius Gedminas Date: Wed, 26 Aug 2015 08:51:52 +0300 Subject: [PATCH 28/48] user: don't generate SSH keys in check mode Fixes https://github.com/ansible/ansible/issues/11768 Test plan: - (in a Vagrant VM) created a user 'bob' with no ssh key - ran the following playbook in check mode: --- - hosts: trusty tasks: - user: name=bob state=present generate_ssh_key=yes - saw that ansible-playbook reported "changes=1" - saw that /home/bob/.ssh was still absent - ran the playbook for real - saw that /home/bob/.ssh was created - ran the playbook in check mode again - saw that ansible-playbook reported no changes - tried a variation with a different username for a user that didn't exist: ansible-playbook --check worked correctly (no errors, reported "changed") --- system/user.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) mode change 100644 => 100755 system/user.py diff --git a/system/user.py b/system/user.py old mode 100644 new mode 100755 index 7e3e4c01cd3..45ce77381ce --- a/system/user.py +++ b/system/user.py @@ -577,11 +577,13 @@ class User(object): def ssh_key_gen(self): info = self.user_info() - if not os.path.exists(info[5]): + if not os.path.exists(info[5]) and not self.module.check_mode: return (1, '', 'User %s home directory does not exist' % self.name) ssh_key_file = self.get_ssh_key_path() ssh_dir = os.path.dirname(ssh_key_file) if not os.path.exists(ssh_dir): + if self.module.check_mode: + return (0, '', '') try: os.mkdir(ssh_dir, 0700) os.chown(ssh_dir, info[2], info[3]) @@ -589,6 +591,8 @@ class User(object): return (1, '', 'Failed to create %s: %s' % (ssh_dir, str(e))) if os.path.exists(ssh_key_file): return (None, 'Key already exists', '') + if self.module.check_mode: + return (0, '', '') cmd = [self.module.get_bin_path('ssh-keygen', True)] cmd.append('-t') cmd.append(self.ssh_type) @@ -2148,6 +2152,7 @@ def main(): # deal with ssh key if user.sshkeygen: + # generate ssh key (note: this function is check mode aware) (rc, out, err) = user.ssh_key_gen() if rc is not None and rc != 0: module.fail_json(name=user.name, msg=err, rc=rc) From 0e42b1708bdfaa4cc5f192c8f68d0d0adb2b03c4 Mon Sep 17 00:00:00 2001 From: Timothy Appnel Date: Wed, 26 Aug 2015 11:53:39 -0400 Subject: [PATCH 29/48] Fixed call to undefined attribute when RDS module timeouts waiting. --- cloud/amazon/rds.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 9e98f50230b..1755be9b1a1 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -610,7 +610,7 @@ def await_resource(conn, resource, status, module): while wait_timeout > time.time() and resource.status != status: time.sleep(5) if wait_timeout <= time.time(): - module.fail_json(msg="Timeout waiting for resource %s" % resource.id) + module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name) if module.params.get('command') == 'snapshot': # Temporary until all the rds2 commands have their responses parsed if resource.name is None: From 2ba32a8b1048f01e67cad68304440df25ca55975 Mon Sep 17 00:00:00 2001 From: Timothy Appnel Date: Wed, 26 Aug 2015 11:52:15 -0400 Subject: [PATCH 30/48] Clarified and cleaned up grammar of error messages. --- cloud/amazon/rds.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 1755be9b1a1..d56c4ae12de 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -614,12 +614,12 @@ def await_resource(conn, resource, status, module): if module.params.get('command') == 'snapshot': # Temporary until all the rds2 commands have their responses parsed if resource.name is None: - module.fail_json(msg="Problem with snapshot %s" % resource.snapshot) + module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot) resource = conn.get_db_snapshot(resource.name) else: # Temporary until all the rds2 commands have their responses parsed if resource.name is None: - module.fail_json(msg="Problem with instance %s" % resource.instance) + module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance) resource = conn.get_db_instance(resource.name) if resource is None: break @@ -653,7 +653,7 @@ def create_db_instance(module, conn): module.params.get('username'), module.params.get('password'), **params) changed = True except RDSException, e: - module.fail_json(msg="failed to create instance: %s" % e.message) + module.fail_json(msg="Failed to create instance: %s" % e.message) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) @@ -680,7 +680,7 @@ def replicate_db_instance(module, conn): result = conn.create_db_instance_read_replica(instance_name, source_instance, **params) changed = True except RDSException, e: - module.fail_json(msg="failed to create replica instance: %s " % e.message) + module.fail_json(msg="Failed to create replica instance: %s " % e.message) if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) @@ -719,7 +719,7 @@ def delete_db_instance_or_snapshot(module, conn): else: result = conn.delete_db_snapshot(snapshot) except RDSException, e: - module.fail_json(msg="failed to delete instance: %s" % e.message) + module.fail_json(msg="Failed to delete instance: %s" % e.message) # If we're not waiting for a delete to complete then we're all done # so just return @@ -745,11 +745,11 @@ def facts_db_instance_or_snapshot(module, conn): snapshot = module.params.get('snapshot') if instance_name and snapshot: - module.fail_json(msg="facts must be called with either instance_name or snapshot, not both") + module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both") if instance_name: resource = conn.get_db_instance(instance_name) if not resource: - module.fail_json(msg="DB Instance %s does not exist" % instance_name) + module.fail_json(msg="DB instance %s does not exist" % instance_name) if snapshot: resource = conn.get_db_snapshot(snapshot) if not resource: @@ -1037,7 +1037,7 @@ def main(): region, ec2_url, aws_connect_params = get_aws_connection_info(module) if not region: - module.fail_json(msg="region not specified and unable to determine region from EC2_REGION.") + module.fail_json(msg="Region not specified. Unable to determine region from EC2_REGION.") # connect to the rds endpoint if has_rds2: From a4aa29edd96e406385697525c883dbb399b18517 Mon Sep 17 00:00:00 2001 From: Luke Date: Fri, 28 Aug 2015 08:34:05 -0400 Subject: [PATCH 31/48] updated examples When testing with existing example, I received errors about auth block not being ingested. After adding the auth block, name and state with "=" caused syntax errors. --- cloud/openstack/os_network.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_network.py b/cloud/openstack/os_network.py index 75c431493f6..f911ce71af1 100644 --- a/cloud/openstack/os_network.py +++ b/cloud/openstack/os_network.py @@ -57,8 +57,13 @@ requirements: ["shade"] EXAMPLES = ''' - os_network: - name=t1network - state=present + name: t1network + state: present + auth: + auth_url: https://your_api_url.com:9000/v2.0 + username: user + password: password + project_name: someproject ''' From 40f2ff9fbf71b7a4330b5ddb56a207dec05d5dbf Mon Sep 17 00:00:00 2001 From: Luke Date: Fri, 28 Aug 2015 08:46:45 -0400 Subject: [PATCH 32/48] removed hyphens in module name in examples --- cloud/openstack/os_client_config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_client_config.py b/cloud/openstack/os_client_config.py index 7128b06ffcb..67c58dfd6ca 100644 --- a/cloud/openstack/os_client_config.py +++ b/cloud/openstack/os_client_config.py @@ -40,12 +40,12 @@ author: "Monty Taylor (@emonty)" EXAMPLES = ''' # Get list of clouds that do not support security groups -- os-client-config: +- os_client_config: - debug: var={{ item }} with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}" # Get the information back just about the mordred cloud -- os-client-config: +- os_client_config: clouds: - mordred ''' From 48f522455da3a7232aa2f1c5cc3d71a11d243860 Mon Sep 17 00:00:00 2001 From: Tim Rupp Date: Fri, 28 Aug 2015 20:35:24 -0700 Subject: [PATCH 33/48] Add hostname support for Kali linux 2.0 This patch allows the hostname module to detect and set the hostname for a Kali Linux 2.0 installation. Without this patch, the hostname module raises the following error hostname module cannot be used on platform Linux (Kali) Kali is based off of Debian. --- system/hostname.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/system/hostname.py b/system/hostname.py index f986a91f8f3..9e7f6a4ef70 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -491,6 +491,11 @@ class DebianHostname(Hostname): distribution = 'Debian' strategy_class = DebianStrategy +class KaliHostname(Hostname): + platform = 'Linux' + distribution = 'Kali' + strategy_class = DebianStrategy + class UbuntuHostname(Hostname): platform = 'Linux' distribution = 'Ubuntu' From 5595a9f81d07f840438f52c0560726c87165f622 Mon Sep 17 00:00:00 2001 From: Marius Gedminas Date: Mon, 31 Aug 2015 09:08:35 +0300 Subject: [PATCH 34/48] authorized_key: fix example in documentation 'key=' cannot be pointing to a file name; it needs to be the key itself as a string (or a URL). --- system/authorized_key.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index 376cf4c61dc..361e68cb009 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -112,8 +112,10 @@ EXAMPLES = ''' key_options='no-port-forwarding,from="10.0.1.1"' # Set up authorized_keys exclusively with one key -- authorized_key: user=root key=public_keys/doe-jane state=present +- authorized_key: user=root key="{{ item }}" state=present exclusive=yes + with_file: + - public_keys/doe-jane ''' # Makes sure the public key line is present or absent in the user's .ssh/authorized_keys. From a18c96882e2713909206c82afc2e7754b206c35d Mon Sep 17 00:00:00 2001 From: Rick Mendes Date: Mon, 31 Aug 2015 09:06:18 -0700 Subject: [PATCH 35/48] using single device_id and enabling release on disassociation --- cloud/amazon/ec2_eip.py | 75 +++++++++++++++++++++++------------------ 1 file changed, 43 insertions(+), 32 deletions(-) diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py index ae3cd06eaa7..5d6532b3955 100644 --- a/cloud/amazon/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -22,14 +22,11 @@ description: - This module associates AWS EC2 elastic IP addresses with instances version_added: "1.4" options: - instance_id: + device_id: description: - - The EC2 instance id - required: false - network_interface_id: - description: - - The Elastic Network Interface (ENI) id + - The id of the device for the EIP. Can be an EC2 Instance id or Elastic Network Interface (ENI) id. required: false + aliases: [ instance_id ] version_added: "2.0" public_ip: description: @@ -61,8 +58,15 @@ options: required: false default: false version_added: "1.6" + release_on_disassociation: + description: + - whether or not to automatically release the EIP when it is disassociated + required: false + default: false + version_added: "2.0" extends_documentation_fragment: aws author: "Lorin Hochstein (@lorin) " +author: "Rick Mendes (@rickmendes) " notes: - This module will return C(public_ip) on success, which will contain the public IP address associated with the instance. @@ -70,19 +74,22 @@ notes: the cloud instance is reachable via the new address. Use wait_for and pause to delay further playbook execution until the instance is reachable, if necessary. + - This module returns multiple changed statuses on disassociation or release. + It returns an overall status based on any changes occuring. It also returns + individual changed statuses for disassociation and release. ''' EXAMPLES = ''' - name: associate an elastic IP with an instance - ec2_eip: instance_id=i-1212f003 ip=93.184.216.119 + ec2_eip: device_id=i-1212f003 ip=93.184.216.119 - name: associate an elastic IP with a device - ec2_eip: network_interface_id=eni-c8ad70f3 ip=93.184.216.119 + ec2_eip: device_id=eni-c8ad70f3 ip=93.184.216.119 - name: disassociate an elastic IP from an instance - ec2_eip: instance_id=i-1212f003 ip=93.184.216.119 state=absent + ec2_eip: device_id=i-1212f003 ip=93.184.216.119 state=absent - name: disassociate an elastic IP with a device - ec2_eip: network_interface_id=eni-c8ad70f3 ip=93.184.216.119 state=absent + ec2_eip: device_id=eni-c8ad70f3 ip=93.184.216.119 state=absent - name: allocate a new elastic IP and associate it with an instance - ec2_eip: instance_id=i-1212f003 + ec2_eip: device_id=i-1212f003 - name: allocate a new elastic IP without associating it to anything action: ec2_eip register: eip @@ -95,7 +102,7 @@ EXAMPLES = ''' ''' group=webserver count=3 register: ec2 - name: associate new elastic IPs with each of the instances - ec2_eip: "instance_id={{ item }}" + ec2_eip: "device_id={{ item }}" with_items: ec2.instance_ids - name: allocate a new elastic IP inside a VPC in us-west-2 ec2_eip: region=us-west-2 in_vpc=yes @@ -292,14 +299,14 @@ def ensure_absent(ec2, domain, address, device_id, check_mode, isinstance=True): def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( - instance_id=dict(required=False), - network_interface_id=dict(required=False), + device_id=dict(required=False, aliases=['instance_id']), public_ip=dict(required=False, aliases=['ip']), state=dict(required=False, default='present', choices=['present', 'absent']), in_vpc=dict(required=False, type='bool', default=False), reuse_existing_ip_allowed=dict(required=False, type='bool', default=False), + release_on_disassociation=dict(required=False, type='bool', default=False), wait_timeout=dict(default=300), )) @@ -313,42 +320,46 @@ def main(): ec2 = ec2_connect(module) - instance_id = module.params.get('instance_id') - network_interface_id = module.params.get('network_interface_id') + device_id = module.params.get('device_id') public_ip = module.params.get('public_ip') state = module.params.get('state') in_vpc = module.params.get('in_vpc') domain = 'vpc' if in_vpc else None reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') + release_on_disassociation = module.params.get('release_on_disassociation') + + if device_id and device_id.startswith('i-'): + is_instance=True + elif device_id: + is_instance=False try: - if network_interface_id: - address = find_address(ec2, public_ip, network_interface_id, isinstance=False) - elif instance_id: - address = find_address(ec2, public_ip, instance_id) + if device_id: + address = find_address(ec2, public_ip, device_id, isinstance=is_instance) else: address = False if state == 'present': - if instance_id: - result = ensure_present(ec2, domain, address, instance_id, - reuse_existing_ip_allowed, - module.check_mode) - elif network_interface_id: - result = ensure_present(ec2, domain, address, network_interface_id, + if device_id: + result = ensure_present(ec2, domain, address, device_id, reuse_existing_ip_allowed, - module.check_mode, isinstance=False) + module.check_mode, isinstance=is_instance) else: address = allocate_address(ec2, domain, reuse_existing_ip_allowed) result = {'changed': True, 'public_ip': address.public_ip} else: - if network_interface_id: - result = ensure_absent(ec2, domain, address, network_interface_id, module.check_mode, isinstance=False) - elif instance_id: - result = ensure_absent(ec2, domain, address, instance_id, module.check_mode) + if device_id: + disassociated = ensure_absent(ec2, domain, address, device_id, module.check_mode, isinstance=is_instance) + + if release_on_disassociation and disassociated['changed']: + released = release_address(ec2, address, module.check_mode) + result = { 'changed': True, 'disassociated': disassociated, 'released': released } + else: + result = { 'changed': disassociated['changed'], 'disassociated': disassociated, 'released': { 'changed': False } } else: address = find_address(ec2, public_ip, None) - result = release_address(ec2, address, module.check_mode) + released = release_address(ec2, address, module.check_mode) + result = { 'changed': released['changed'], 'disassociated': { 'changed': False }, 'released': released } except (boto.exception.EC2ResponseError, EIPException) as e: module.fail_json(msg=str(e)) From 06fc029f73ccd323e355424c016bbcbbc2fb8c80 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 31 Aug 2015 15:09:50 -0700 Subject: [PATCH 36/48] Remove non-ascii quote char --- cloud/amazon/ec2_lc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index e0d7d2c1a64..fa6c64490ad 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -124,7 +124,7 @@ options: version_added: "2.0" classic_link_vpc_security_groups: description: - - A list of security group id’s with which to associate the ClassicLink VPC instances. + - A list of security group id's with which to associate the ClassicLink VPC instances. required: false default: null version_added: "2.0" From 68ab025dac8ea0f9779f57fde2236bac6ce95084 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 19 Aug 2015 11:18:15 -0400 Subject: [PATCH 37/48] minor doc fixes --- files/assemble.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/files/assemble.py b/files/assemble.py index 73d4214eb9e..a996fe44084 100644 --- a/files/assemble.py +++ b/files/assemble.py @@ -91,9 +91,11 @@ options: validate is passed in via '%s' which must be present as in the sshd example below. The command is passed securely so shell features like expansion and pipes won't work. required: false - default: "" + default: null + version_added: "2.0" author: "Stephen Fromm (@sfromm)" -extends_documentation_fragment: files +extends_documentation_fragment: + - files ''' EXAMPLES = ''' @@ -104,7 +106,7 @@ EXAMPLES = ''' - assemble: src=/etc/someapp/fragments dest=/etc/someapp/someapp.conf delimiter='### START FRAGMENT ###' # Copy a new "sshd_config" file into place, after passing validation with sshd -- assemble: src=/etc/ssh/conf.d/ dest=/etc/ssh/sshd_config validate='sshd -t -f %s' +- assemble: src=/etc/ssh/conf.d/ dest=/etc/ssh/sshd_config validate='/usr/sbin/sshd -t -f %s' ''' # =========================================== From bbcfb1092ae22e8520f6241b9da7f99a4f7423cd Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 1 Sep 2015 10:57:37 -0400 Subject: [PATCH 38/48] check systemctl status before show as show will not return antyhing other than rc=0 even when it fails. --- system/service.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index 8495bec9e24..70ff83517fd 100644 --- a/system/service.py +++ b/system/service.py @@ -520,7 +520,13 @@ class LinuxService(Service): return False def get_systemd_status_dict(self): - (rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit,)) + + # Check status first as show will not fail if service does not exist + (rc, out, err) = self.execute_command("%s status '%s'" % (self.enable_cmd, self.__systemd_unit,)) + if rc != 0: + self.module.fail_json(msg='failure %d running systemctl status for %r: %s' % (rc, self.__systemd_unit, err)) + + (rc, out, err) = self.execute_command("%s show '%s'" % (self.enable_cmd, self.__systemd_unit,)) if rc != 0: self.module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, self.__systemd_unit, err)) key = None From 3830af652f73b8d896dd0a1d670ebc33e65489b0 Mon Sep 17 00:00:00 2001 From: Chrrrles Paul Date: Tue, 1 Sep 2015 10:47:43 -0500 Subject: [PATCH 39/48] Revert "add virtual floppy to VMware guest" --- cloud/vmware/vsphere_guest.py | 62 ----------------------------------- 1 file changed, 62 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index c152491a8aa..701df22dfba 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -185,9 +185,6 @@ EXAMPLES = ''' vm_cdrom: type: "iso" iso_path: "DatastoreName/cd-image.iso" - vm_floppy: - type: "image" - image_path: "DatastoreName/floppy-image.flp" esxi: datacenter: MyDatacenter hostname: esx001.mydomain.local @@ -379,44 +376,6 @@ def add_cdrom(module, s, config_target, config, devices, default_devs, type="cli devices.append(cd_spec) -def add_floppy(module, s, config_target, config, devices, default_devs, type="image", vm_floppy_image_path=None): - # Add a floppy - # Make sure the datastore exists. - if vm_floppy_image_path: - image_location = vm_floppy_image_path.split('/', 1) - datastore, ds = find_datastore( - module, s, image_location[0], config_target) - image_path = image_location[1] - - floppy_spec = config.new_deviceChange() - floppy_spec.set_element_operation('add') - floppy_ctrl = VI.ns0.VirtualFloppy_Def("floppy_ctrl").pyclass() - - if type == "image": - image = VI.ns0.VirtualFloppyImageBackingInfo_Def("image").pyclass() - ds_ref = image.new_datastore(ds) - ds_ref.set_attribute_type(ds.get_attribute_type()) - image.set_element_datastore(ds_ref) - image.set_element_fileName("%s %s" % (datastore, image_path)) - floppy_ctrl.set_element_backing(image) - floppy_ctrl.set_element_key(3) - floppy_spec.set_element_device(floppy_ctrl) - elif type == "client": - client = VI.ns0.VirtualFloppyRemoteDeviceBackingInfo_Def( - "client").pyclass() - client.set_element_deviceName("/dev/fd0") - floppy_ctrl.set_element_backing(client) - floppy_ctrl.set_element_key(3) - floppy_spec.set_element_device(floppy_ctrl) - else: - s.disconnect() - module.fail_json( - msg="Error adding floppy of type %s to vm spec. " - " floppy type can either be image or client" % (type)) - - devices.append(floppy_spec) - - def add_nic(module, s, nfmor, config, devices, nic_type="vmxnet3", network_name="VM Network", network_type="standard"): # add a NIC # Different network card types are: "VirtualE1000", @@ -987,27 +946,6 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, # Add a CD-ROM device to the VM. add_cdrom(module, vsphere_client, config_target, config, devices, default_devs, cdrom_type, cdrom_iso_path) - if 'vm_floppy' in vm_hardware: - floppy_image_path = None - floppy_type = None - try: - floppy_type = vm_hardware['vm_floppy']['type'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. floppy type needs to be" - " specified." % vm_hardware['vm_floppy']) - if floppy_type == 'image': - try: - floppy_image_path = vm_hardware['vm_floppy']['image_path'] - except KeyError: - vsphere_client.disconnect() - module.fail_json( - msg="Error on %s definition. floppy image_path needs" - " to be specified." % vm_hardware['vm_floppy']) - # Add a floppy to the VM. - add_floppy(module, vsphere_client, config_target, config, devices, - default_devs, floppy_type, floppy_image_path) if vm_nic: for nic in sorted(vm_nic.iterkeys()): try: From 8ebd6cc7cdab4ac4db2bcfd21c58564a3529d466 Mon Sep 17 00:00:00 2001 From: Chrrrles Paul Date: Tue, 1 Sep 2015 10:55:10 -0500 Subject: [PATCH 40/48] Revert "Add 2.0 docs - Revert "add virtual floppy to VMware guest"" --- cloud/vmware/vsphere_guest.py | 62 +++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 701df22dfba..c152491a8aa 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -185,6 +185,9 @@ EXAMPLES = ''' vm_cdrom: type: "iso" iso_path: "DatastoreName/cd-image.iso" + vm_floppy: + type: "image" + image_path: "DatastoreName/floppy-image.flp" esxi: datacenter: MyDatacenter hostname: esx001.mydomain.local @@ -376,6 +379,44 @@ def add_cdrom(module, s, config_target, config, devices, default_devs, type="cli devices.append(cd_spec) +def add_floppy(module, s, config_target, config, devices, default_devs, type="image", vm_floppy_image_path=None): + # Add a floppy + # Make sure the datastore exists. + if vm_floppy_image_path: + image_location = vm_floppy_image_path.split('/', 1) + datastore, ds = find_datastore( + module, s, image_location[0], config_target) + image_path = image_location[1] + + floppy_spec = config.new_deviceChange() + floppy_spec.set_element_operation('add') + floppy_ctrl = VI.ns0.VirtualFloppy_Def("floppy_ctrl").pyclass() + + if type == "image": + image = VI.ns0.VirtualFloppyImageBackingInfo_Def("image").pyclass() + ds_ref = image.new_datastore(ds) + ds_ref.set_attribute_type(ds.get_attribute_type()) + image.set_element_datastore(ds_ref) + image.set_element_fileName("%s %s" % (datastore, image_path)) + floppy_ctrl.set_element_backing(image) + floppy_ctrl.set_element_key(3) + floppy_spec.set_element_device(floppy_ctrl) + elif type == "client": + client = VI.ns0.VirtualFloppyRemoteDeviceBackingInfo_Def( + "client").pyclass() + client.set_element_deviceName("/dev/fd0") + floppy_ctrl.set_element_backing(client) + floppy_ctrl.set_element_key(3) + floppy_spec.set_element_device(floppy_ctrl) + else: + s.disconnect() + module.fail_json( + msg="Error adding floppy of type %s to vm spec. " + " floppy type can either be image or client" % (type)) + + devices.append(floppy_spec) + + def add_nic(module, s, nfmor, config, devices, nic_type="vmxnet3", network_name="VM Network", network_type="standard"): # add a NIC # Different network card types are: "VirtualE1000", @@ -946,6 +987,27 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, # Add a CD-ROM device to the VM. add_cdrom(module, vsphere_client, config_target, config, devices, default_devs, cdrom_type, cdrom_iso_path) + if 'vm_floppy' in vm_hardware: + floppy_image_path = None + floppy_type = None + try: + floppy_type = vm_hardware['vm_floppy']['type'] + except KeyError: + vsphere_client.disconnect() + module.fail_json( + msg="Error on %s definition. floppy type needs to be" + " specified." % vm_hardware['vm_floppy']) + if floppy_type == 'image': + try: + floppy_image_path = vm_hardware['vm_floppy']['image_path'] + except KeyError: + vsphere_client.disconnect() + module.fail_json( + msg="Error on %s definition. floppy image_path needs" + " to be specified." % vm_hardware['vm_floppy']) + # Add a floppy to the VM. + add_floppy(module, vsphere_client, config_target, config, devices, + default_devs, floppy_type, floppy_image_path) if vm_nic: for nic in sorted(vm_nic.iterkeys()): try: From 6a40e8b4545822bdaf0e33c44e680d53bd8f7175 Mon Sep 17 00:00:00 2001 From: Charles Paul Date: Tue, 1 Sep 2015 14:53:11 -0500 Subject: [PATCH 41/48] vm_floppy 2.0 support --- cloud/vmware/vsphere_guest.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index c152491a8aa..41da954ac32 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -152,6 +152,7 @@ EXAMPLES = ''' # Returns changed = True and a adds ansible_facts from the new VM # State will set the power status of a guest upon creation. Use powered_on to create and boot. # Options ['state', 'vm_extra_config', 'vm_disk', 'vm_nic', 'vm_hardware', 'esxi'] are required together +# Note: vm_floppy support added in 2.0 - vsphere_guest: vcenter_hostname: vcenter.mydomain.local From c54f875fdd7f26a2fce6cd2986b096a283452b2e Mon Sep 17 00:00:00 2001 From: James Martin Date: Wed, 2 Sep 2015 10:18:38 -0500 Subject: [PATCH 42/48] Adds sanity check to make sure nics is a list. --- cloud/openstack/os_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index 959f39880f8..90cc7282d04 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -392,7 +392,7 @@ def main(): flavor_include = dict(default=None), key_name = dict(default=None), security_groups = dict(default='default'), - nics = dict(default=[]), + nics = dict(default=[], type='list'), meta = dict(default=None), userdata = dict(default=None), config_drive = dict(default=False, type='bool'), From e278f285aa6f61e45416be28b1e689b4d7607196 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 2 Sep 2015 17:09:53 -0400 Subject: [PATCH 43/48] partially reverted previous change to deal with systemctl show status not returning errors on missing service Now it looks for not-found key instead of running status which does return error codes when service is present but in diff states. fixes #12216 --- system/service.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/system/service.py b/system/service.py index 70ff83517fd..4255ecb83ab 100644 --- a/system/service.py +++ b/system/service.py @@ -522,13 +522,12 @@ class LinuxService(Service): def get_systemd_status_dict(self): # Check status first as show will not fail if service does not exist - (rc, out, err) = self.execute_command("%s status '%s'" % (self.enable_cmd, self.__systemd_unit,)) - if rc != 0: - self.module.fail_json(msg='failure %d running systemctl status for %r: %s' % (rc, self.__systemd_unit, err)) - (rc, out, err) = self.execute_command("%s show '%s'" % (self.enable_cmd, self.__systemd_unit,)) if rc != 0: self.module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, self.__systemd_unit, err)) + elif 'LoadState=not-found' in out: + self.module.fail_json(msg='systemd could not find the requested service "%r": %s' % (self.__systemd_unit, err)) + key = None value_buffer = [] status_dict = {} From 2520627fe7d487a08e077a01bb1251f3757f0515 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 3 Sep 2015 08:46:35 -0700 Subject: [PATCH 44/48] Make sure listener ports are ints. May fix #1984 --- cloud/amazon/ec2_elb_lb.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 3d54f994436..8c739e1a2b2 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -573,8 +573,8 @@ class ElbManager(object): # N.B. string manipulations on protocols below (str(), upper()) is to # ensure format matches output from ELB API listener_list = [ - listener['load_balancer_port'], - listener['instance_port'], + int(listener['load_balancer_port']), + int(listener['instance_port']), str(listener['protocol'].upper()), ] From 1f358f349b73e008801f32cf046b5533abbefc5c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 3 Sep 2015 15:39:18 -0700 Subject: [PATCH 45/48] We had two separate methods trying to do the same thing but neither one was complete. This merges them so that all of the options get parsed and applied. --- cloud/docker/docker.py | 89 ++++++++++++------------------------------ 1 file changed, 26 insertions(+), 63 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 82c39006678..99ede1b564f 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -801,7 +801,8 @@ class DockerManager(object): optionals = {} for optional_param in ('dns', 'volumes_from', 'restart_policy', - 'restart_policy_retry', 'pid'): + 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver', + 'cap_add', 'cap_drop'): optionals[optional_param] = self.module.params.get(optional_param) if optionals['dns'] is not None: @@ -818,13 +819,35 @@ class DockerManager(object): if params['restart_policy']['Name'] == 'on-failure': params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] + # docker_py only accepts 'host' or None + if 'pid' in optionals and not optionals['pid']: + optionals['pid'] = None + if optionals['pid'] is not None: self.ensure_capability('pid') params['pid_mode'] = optionals['pid'] + if optionals['extra_hosts'] is not None: + self.ensure_capability('extra_hosts') + params['extra_hosts'] = optionals['extra_hosts'] + + if optionals['log_driver'] is not None: + self.ensure_capability('log_driver') + log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON) + log_config.type = optionals['log_driver'] + params['log_config'] = log_config + + if optionals['cap_add'] is not None: + self.ensure_capability('cap_add') + params['cap_add'] = optionals['cap_add'] + + if optionals['cap_drop'] is not None: + self.ensure_capability('cap_drop') + params['cap_drop'] = optionals['cap_drop'] + return params - def get_host_config(self): + def create_host_config(self): """ Create HostConfig object """ @@ -1340,65 +1363,6 @@ class DockerManager(object): except Exception as e: self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e)) - def create_host_config(self): - params = { - 'lxc_conf': self.lxc_conf, - 'binds': self.binds, - 'port_bindings': self.port_bindings, - 'publish_all_ports': self.module.params.get('publish_all_ports'), - 'privileged': self.module.params.get('privileged'), - 'links': self.links, - 'network_mode': self.module.params.get('net'), - } - - optionals = {} - for optional_param in ('dns', 'volumes_from', 'restart_policy', - 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver', - 'cap_add', 'cap_drop'): - optionals[optional_param] = self.module.params.get(optional_param) - - if optionals['dns'] is not None: - self.ensure_capability('dns') - params['dns'] = optionals['dns'] - - if optionals['volumes_from'] is not None: - self.ensure_capability('volumes_from') - params['volumes_from'] = optionals['volumes_from'] - - if optionals['restart_policy'] is not None: - self.ensure_capability('restart_policy') - params['restart_policy'] = { 'Name': optionals['restart_policy'] } - if params['restart_policy']['Name'] == 'on-failure': - params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] - - # docker_py only accepts 'host' or None - if 'pid' in optionals and not optionals['pid']: - optionals['pid'] = None - - if optionals['pid'] is not None: - self.ensure_capability('pid') - params['pid_mode'] = optionals['pid'] - - if optionals['extra_hosts'] is not None: - self.ensure_capability('extra_hosts') - params['extra_hosts'] = optionals['extra_hosts'] - - if optionals['log_driver'] is not None: - self.ensure_capability('log_driver') - log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON) - log_config.type = optionals['log_driver'] - params['log_config'] = log_config - - if optionals['cap_add'] is not None: - self.ensure_capability('cap_add') - params['cap_add'] = optionals['cap_add'] - - if optionals['cap_drop'] is not None: - self.ensure_capability('cap_drop') - params['cap_drop'] = optionals['cap_drop'] - - return docker.utils.create_host_config(**params) - def create_containers(self, count=1): try: mem_limit = _human_to_bytes(self.module.params.get('memory_limit')) @@ -1418,11 +1382,10 @@ class DockerManager(object): 'stdin_open': self.module.params.get('stdin_open'), 'tty': self.module.params.get('tty'), 'cpuset': self.module.params.get('cpu_set'), - 'host_config': self.create_host_config(), 'user': self.module.params.get('docker_user'), } if self.ensure_capability('host_config', fail=False): - params['host_config'] = self.get_host_config() + params['host_config'] = self.create_host_config() #For v1.19 API and above use HostConfig, otherwise use Config if api_version < 1.19: From 6e5a832dc28e1de72f296f7fe2b9bda294bc5b50 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 4 Sep 2015 15:59:19 -0700 Subject: [PATCH 46/48] Fix read-only usage to depend on the docker-py and docker server version --- cloud/docker/docker.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 99ede1b564f..f236f1b52fb 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -268,7 +268,7 @@ options: read_only: description: - Mount the container's root filesystem as read only - default: false + default: null aliases: [] version_added: "2.0" restart_policy: @@ -796,13 +796,12 @@ class DockerManager(object): 'privileged': self.module.params.get('privileged'), 'links': self.links, 'network_mode': self.module.params.get('net'), - 'read_only': self.module.params.get('read_only'), } optionals = {} for optional_param in ('dns', 'volumes_from', 'restart_policy', 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver', - 'cap_add', 'cap_drop'): + 'cap_add', 'cap_drop', 'read_only'): optionals[optional_param] = self.module.params.get(optional_param) if optionals['dns'] is not None: @@ -845,6 +844,10 @@ class DockerManager(object): self.ensure_capability('cap_drop') params['cap_drop'] = optionals['cap_drop'] + if optionals['read_only'] is not None: + self.ensure_capability('read_only') + params['read_only'] = optionals['read_only'] + return params def create_host_config(self): @@ -1627,7 +1630,7 @@ def main(): cpu_set = dict(default=None), cap_add = dict(default=None, type='list'), cap_drop = dict(default=None, type='list'), - read_only = dict(default=False, type='bool'), + read_only = dict(default=None, type='bool'), ), required_together = ( ['tls_client_cert', 'tls_client_key'], From 2ae37e7845c6acfb03cd0dadf2225bfaf82dc8e6 Mon Sep 17 00:00:00 2001 From: Victor Costan Date: Fri, 4 Sep 2015 20:46:26 -0400 Subject: [PATCH 47/48] os_server: nice error when flavor not found When we can't find the VM flavor that the user requests, this change replaces the non-descript stack trace with a clear error message. --- cloud/openstack/os_server.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index 90cc7282d04..1fe1a7b65a3 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -287,8 +287,12 @@ def _create_server(module, cloud): if flavor: flavor_dict = cloud.get_flavor(flavor) + if not flavor_dict: + module.fail_json(msg="Could not find flavor %s" % flavor) else: flavor_dict = cloud.get_flavor_by_ram(flavor_ram, flavor_include) + if not flavor_dict: + module.fail_json(msg="Could not find any matching flavor") nics = _network_args(module, cloud) From 34655e8e29c089e33cd505198e1356b893dfff32 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sat, 5 Sep 2015 08:56:02 -0700 Subject: [PATCH 48/48] correct documentation formatting --- cloud/amazon/ec2_asg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index e67d2a07d39..db6cd061480 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -130,7 +130,7 @@ options: description: - An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity. required: false - default: Default. E.g.: When used to create a new autoscaling group, the “Default” value is used. When used to change an existent autoscaling group, the current termination policies are mantained + default: Default. Eg, when used to create a new autoscaling group, the “Default” value is used. When used to change an existent autoscaling group, the current termination policies are mantained choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default'] version_added: "2.0" extends_documentation_fragment: aws