From 7f9305b24e63b411a00a2e036c2f8a694d7a327b Mon Sep 17 00:00:00 2001 From: anatoly techtonik Date: Mon, 29 Sep 2014 17:59:23 +0300 Subject: [PATCH 001/250] files.stat: Expose path in returned result This is needed to apply subsequent operation on tested path --- files/stat.py | 1 + 1 file changed, 1 insertion(+) diff --git a/files/stat.py b/files/stat.py index 8c717a395c4..2a9189e532e 100644 --- a/files/stat.py +++ b/files/stat.py @@ -99,6 +99,7 @@ def main(): # back to ansible d = { 'exists' : True, + 'path' : path, 'mode' : "%04o" % S_IMODE(mode), 'isdir' : S_ISDIR(mode), 'ischr' : S_ISCHR(mode), From e26cbb6acec888601bd3446fe4db03f376d904b1 Mon Sep 17 00:00:00 2001 From: Tongliang Liu Date: Mon, 29 Sep 2014 13:25:46 -0700 Subject: [PATCH 002/250] Added support of returning owner's group name in stat module --- files/stat.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/files/stat.py b/files/stat.py index 8c717a395c4..36ef7145783 100644 --- a/files/stat.py +++ b/files/stat.py @@ -66,6 +66,7 @@ import os import sys from stat import * import pwd +import grp def main(): module = AnsibleModule( @@ -140,6 +141,9 @@ def main(): pw = pwd.getpwuid(st.st_uid) d['pw_name'] = pw.pw_name + + grp_info = grp.getgrgid(pw.pw_gid) + d['gr_name'] = grp_info.gr_name except: pass From 2206d4b1e0d71a610102a415ca00180aca237e60 Mon Sep 17 00:00:00 2001 From: Joe Adams Date: Mon, 29 Sep 2014 17:02:08 -0400 Subject: [PATCH 003/250] Added note to files/synchronize module about synchronizing two directories on the same host --- files/synchronize.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/files/synchronize.py b/files/synchronize.py index 842dd863849..73108dbd3ba 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -180,7 +180,9 @@ local_action: synchronize src=some/relative/path dest=/some/absolute/path pull mode synchronize: mode=pull src=some/relative/path dest=/some/absolute/path -# Synchronization of src on delegate host to dest on the current inventory host +# Synchronization of src on delegate host to dest on the current inventory host. +# If delegate_to is set to the current inventory host, this can be used to syncronize +# two directories on that host. synchronize: > src=some/relative/path dest=/some/absolute/path delegate_to: delegate.host From b3e84f2dd42afbc35cd0ed349320d5fb4f3ab2ec Mon Sep 17 00:00:00 2001 From: Phillip Holmes Date: Mon, 29 Sep 2014 16:20:25 -0500 Subject: [PATCH 004/250] Route53 fix - forcing zone_in, record_in to lower case It turns out the Route53 API cares if the zone and record specified in the playbook are lower case or not when deleting a record. If you use a variable to name your servers and care about case, using that same proper case name will cause Route53 DNS delete requests to fail. The change requested adds .lower() to the module.params.get for both zone and record when used in the underlying code. Both zone and record are mandatory variables, and as such a more complicated implementation is not needed, as they must always be specified when using this module see lines 169 and 170 for the required state). If you use lowercase names (or don't use a name variable and share it between a tag and DNS entries) then you will never see this issue. Tested/Confirmed as an issue in Ansible 1.6.6 and above. --- cloud/route53.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/route53.py b/cloud/route53.py index b3878e0580e..d0723a3d0e6 100644 --- a/cloud/route53.py +++ b/cloud/route53.py @@ -178,9 +178,9 @@ def main(): module = AnsibleModule(argument_spec=argument_spec) command_in = module.params.get('command') - zone_in = module.params.get('zone') + zone_in = module.params.get('zone').tolower ttl_in = module.params.get('ttl') - record_in = module.params.get('record') + record_in = module.params.get('record').tolower type_in = module.params.get('type') value_in = module.params.get('value') retry_interval_in = module.params.get('retry_interval') From 7402827950b873d35ff7d25174e57d71bff5c598 Mon Sep 17 00:00:00 2001 From: Phillip Holmes Date: Mon, 29 Sep 2014 16:23:41 -0500 Subject: [PATCH 005/250] Route53 fix - forcing zone_in, record_in to lower case Fixed the .tolower to .lower() for correct syntax (copied change from older notes). --- cloud/route53.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/route53.py b/cloud/route53.py index d0723a3d0e6..0d7fdcbade5 100644 --- a/cloud/route53.py +++ b/cloud/route53.py @@ -178,9 +178,9 @@ def main(): module = AnsibleModule(argument_spec=argument_spec) command_in = module.params.get('command') - zone_in = module.params.get('zone').tolower + zone_in = module.params.get('zone').lower() ttl_in = module.params.get('ttl') - record_in = module.params.get('record').tolower + record_in = module.params.get('record').lower() type_in = module.params.get('type') value_in = module.params.get('value') retry_interval_in = module.params.get('retry_interval') From 9cbd4d0faddf4f1f692f7a7ee7189c8bb1f901e6 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Mon, 29 Sep 2014 18:02:42 -0400 Subject: [PATCH 006/250] Fix old ticket #9092 where a playbook can enter in recursion This can be tested with this command : ansible -c local -m copy -a 'src=/etc/group dest=foo/' all This is a corner case of the algorithm used to find how we should copy recursively a folder, and this commit detect it and avoid it. Check https://github.com/ansible/ansible/issues/9092 for the story --- files/copy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/copy.py b/files/copy.py index eff46dae982..203a36b5b7a 100644 --- a/files/copy.py +++ b/files/copy.py @@ -176,7 +176,7 @@ def main(): if original_basename and dest.endswith("/"): dest = os.path.join(dest, original_basename) dirname = os.path.dirname(dest) - if not os.path.exists(dirname): + if not os.path.exists(dirname) and '/' in dirname: (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname) os.makedirs(dirname) directory_args = module.load_file_common_arguments(module.params) From 37d99031693cb9672d4337bd8c2f1935531ced94 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Mon, 29 Sep 2014 18:07:41 -0400 Subject: [PATCH 007/250] Add hostname to generated user SSH key comment The default is not very useful to sort between different keys and user. Adding the hostname in the comment permit to later sort them if you start to reuse the key and set them in different servers. See https://github.com/ansible/ansible/pull/7420 for the rational. --- system/user.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/system/user.py b/system/user.py index 551384a7a67..95e48b4a21a 100644 --- a/system/user.py +++ b/system/user.py @@ -159,7 +159,7 @@ options: - Optionally specify the SSH key filename. ssh_key_comment: required: false - default: ansible-generated + default: ansible-generated on $HOSTNAME version_added: "0.9" description: - Optionally define the comment for the SSH key. @@ -198,6 +198,7 @@ import pwd import grp import syslog import platform +import socket try: import spwd @@ -1453,7 +1454,7 @@ def main(): 'bits': '2048', 'type': 'rsa', 'passphrase': None, - 'comment': 'ansible-generated' + 'comment': 'ansible-generated on %s' % socket.gethostname() } module = AnsibleModule( argument_spec = dict( From 82af0743820fa901423132cd2afa8ee1358315ce Mon Sep 17 00:00:00 2001 From: kustodian Date: Tue, 30 Sep 2014 00:33:55 +0200 Subject: [PATCH 008/250] Set selinux state to 'permissive' for state=disabled --- system/selinux.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/system/selinux.py b/system/selinux.py index 53e53d1d49c..908bbc250ec 100644 --- a/system/selinux.py +++ b/system/selinux.py @@ -174,14 +174,19 @@ def main(): if (state != runtime_state): if module.check_mode: module.exit_json(changed=True) - if (state == 'disabled'): - msgs.append('state change will take effect next reboot') - else: - if (runtime_enabled): + if (runtime_enabled): + if (state == 'disabled'): + if (runtime_state != 'permissive'): + # Temporarily set state to permissive + set_state('permissive') + msgs.append('runtime state temporarily changed from \'%s\' to \'permissive\', state change will take effect next reboot' % (runtime_state)) + else: + msgs.append('state change will take effect next reboot') + else: set_state(state) msgs.append('runtime state changed from \'%s\' to \'%s\'' % (runtime_state, state)) - else: - msgs.append('state change will take effect next reboot') + else: + msgs.append('state change will take effect next reboot') changed=True if (state != config_state): From 30aae62e255533da6689c2895011a3e991b6222c Mon Sep 17 00:00:00 2001 From: Chad Nelson Date: Fri, 20 Jun 2014 23:20:50 -0400 Subject: [PATCH 009/250] Give option on how git tracks submodules. Allows user to decide if git submodule should track branches/tags or track commit hashes defined in the superproject. Add track_branches parameter to the git module. Defaults to track branches behavior. --- source_control/git.py | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index a5d94e3dbbe..c44284dd591 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -128,6 +128,16 @@ options: description: - if C(no), repository will be cloned without the --recursive option, skipping sub-modules. + + track_branches: + required: false + default: "yes" + choices: ["yes", "no"] + version_added: "1.7" + description: + - if C(no), repository will be cloned without the --recursive + option, allowing submodules to be tracked by commit hash + instead of branch name. notes: - "If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, @@ -399,7 +409,7 @@ def get_head_branch(git_path, module, dest, remote, bare=False): f.close() return branch -def fetch(git_path, module, repo, dest, version, remote, bare): +def fetch(git_path, module, repo, dest, version, remote, bare, track_branches): ''' updates repo from remote sources ''' (rc, out0, err0) = module.run_command([git_path, 'remote', 'set-url', remote, repo], cwd=dest) if rc != 0: @@ -417,10 +427,10 @@ def fetch(git_path, module, repo, dest, version, remote, bare): (rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote), cwd=dest) if rc != 0: module.fail_json(msg="Failed to download remote objects and refs") - (rc, out3, err3) = submodule_update(git_path, module, dest) + (rc, out3, err3) = submodule_update(git_path, module, dest, track_branches ) return (rc, out1 + out2 + out3, err1 + err2 + err3) -def submodule_update(git_path, module, dest): +def submodule_update(git_path, module, dest, track_branches): ''' init and update any submodules ''' # get the valid submodule params @@ -431,7 +441,7 @@ def submodule_update(git_path, module, dest): return (0, '', '') cmd = [ git_path, 'submodule', 'sync' ] (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) - if 'remote' in params: + if 'remote' in params and track_branches: cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ,'--remote' ] else: cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ] @@ -440,8 +450,8 @@ def submodule_update(git_path, module, dest): module.fail_json(msg="Failed to init/update submodules: %s" % out + err) return (rc, out, err) -def switch_version(git_path, module, dest, remote, version, recursive): - ''' once pulled, switch to a particular SHA, tag, or branch ''' + +def switch_version(git_path, module, dest, remote, version, recursive, track_branches): cmd = '' if version != 'HEAD': if is_remote_branch(git_path, module, dest, remote, version): @@ -467,7 +477,7 @@ def switch_version(git_path, module, dest, remote, version, recursive): else: module.fail_json(msg="Failed to checkout branch %s" % (branch)) if recursive: - (rc, out2, err2) = submodule_update(git_path, module, dest) + (rc, out2, err2) = submodule_update(git_path, module, dest, track_branches) out1 += out2 err1 += err1 return (rc, out1, err1) @@ -491,6 +501,7 @@ def main(): executable=dict(default=None), bare=dict(default='no', type='bool'), recursive=dict(default='yes', type='bool'), + track_branches=dict(default='yes', type='bool'), ), supports_check_mode=True ) @@ -535,6 +546,7 @@ def main(): add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) recursive = module.params['recursive'] + track_branches = module.params['track_branches'] rc, out, err, status = (0, None, None, None) @@ -580,12 +592,12 @@ def main(): module.exit_json(changed=False, before=before, after=remote_head) if module.check_mode: module.exit_json(changed=True, before=before, after=remote_head) - fetch(git_path, module, repo, dest, version, remote, bare) + fetch(git_path, module, repo, dest, version, remote, bare, track_branches) # switch to version specified regardless of whether # we cloned or pulled if not bare: - switch_version(git_path, module, dest, remote, version, recursive) + switch_version(git_path, module, dest, remote, version, recursive, track_branches) # determine if we changed anything after = get_version(module, git_path, dest) From 37ed9b19ef9eed3e3bed94795a63429176e1d604 Mon Sep 17 00:00:00 2001 From: Chad Nelson Date: Tue, 5 Aug 2014 06:53:55 -0400 Subject: [PATCH 010/250] Chnge option name to track_submodule_branches. Update documentation to reflect what the actual effect of the option. --- source_control/git.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index c44284dd591..e1e11571623 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -129,13 +129,13 @@ options: - if C(no), repository will be cloned without the --recursive option, skipping sub-modules. - track_branches: + track_submodule_branches: required: false default: "yes" choices: ["yes", "no"] version_added: "1.7" description: - - if C(no), repository will be cloned without the --recursive + - if C(no), submodules will be updated without the --remote option, allowing submodules to be tracked by commit hash instead of branch name. notes: @@ -409,7 +409,7 @@ def get_head_branch(git_path, module, dest, remote, bare=False): f.close() return branch -def fetch(git_path, module, repo, dest, version, remote, bare, track_branches): +def fetch(git_path, module, repo, dest, version, remote, bare, track_submodule_branches): ''' updates repo from remote sources ''' (rc, out0, err0) = module.run_command([git_path, 'remote', 'set-url', remote, repo], cwd=dest) if rc != 0: @@ -427,10 +427,10 @@ def fetch(git_path, module, repo, dest, version, remote, bare, track_branches): (rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote), cwd=dest) if rc != 0: module.fail_json(msg="Failed to download remote objects and refs") - (rc, out3, err3) = submodule_update(git_path, module, dest, track_branches ) + (rc, out3, err3) = submodule_update(git_path, module, dest, track_submodule_branches ) return (rc, out1 + out2 + out3, err1 + err2 + err3) -def submodule_update(git_path, module, dest, track_branches): +def submodule_update(git_path, module, dest, track_submodule_branches): ''' init and update any submodules ''' # get the valid submodule params @@ -441,7 +441,7 @@ def submodule_update(git_path, module, dest, track_branches): return (0, '', '') cmd = [ git_path, 'submodule', 'sync' ] (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) - if 'remote' in params and track_branches: + if 'remote' in params and track_submodule_branches: cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ,'--remote' ] else: cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ] @@ -451,7 +451,7 @@ def submodule_update(git_path, module, dest, track_branches): return (rc, out, err) -def switch_version(git_path, module, dest, remote, version, recursive, track_branches): +def switch_version(git_path, module, dest, remote, version, recursive, track_submodule_branches): cmd = '' if version != 'HEAD': if is_remote_branch(git_path, module, dest, remote, version): @@ -477,7 +477,7 @@ def switch_version(git_path, module, dest, remote, version, recursive, track_bra else: module.fail_json(msg="Failed to checkout branch %s" % (branch)) if recursive: - (rc, out2, err2) = submodule_update(git_path, module, dest, track_branches) + (rc, out2, err2) = submodule_update(git_path, module, dest, track_submodule_branches) out1 += out2 err1 += err1 return (rc, out1, err1) @@ -501,7 +501,7 @@ def main(): executable=dict(default=None), bare=dict(default='no', type='bool'), recursive=dict(default='yes', type='bool'), - track_branches=dict(default='yes', type='bool'), + track_submodule_branches=dict(default='yes', type='bool'), ), supports_check_mode=True ) @@ -546,7 +546,7 @@ def main(): add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) recursive = module.params['recursive'] - track_branches = module.params['track_branches'] + track_submodule_branches = module.params['track_submodule_branches'] rc, out, err, status = (0, None, None, None) @@ -592,12 +592,12 @@ def main(): module.exit_json(changed=False, before=before, after=remote_head) if module.check_mode: module.exit_json(changed=True, before=before, after=remote_head) - fetch(git_path, module, repo, dest, version, remote, bare, track_branches) + fetch(git_path, module, repo, dest, version, remote, bare, track_submodule_branches) # switch to version specified regardless of whether # we cloned or pulled if not bare: - switch_version(git_path, module, dest, remote, version, recursive, track_branches) + switch_version(git_path, module, dest, remote, version, recursive, track_submodule_branches) # determine if we changed anything after = get_version(module, git_path, dest) From 2cecd799c24164e2f26829c26e234c5d8af1ba1d Mon Sep 17 00:00:00 2001 From: sterutkb Date: Tue, 30 Sep 2014 11:20:59 +0200 Subject: [PATCH 011/250] Added support for deploying virtual machine from a virtual template --- cloud/vsphere_guest.py | 106 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 101 insertions(+), 5 deletions(-) diff --git a/cloud/vsphere_guest.py b/cloud/vsphere_guest.py index a91a8199dda..ebaf1b4d6a9 100644 --- a/cloud/vsphere_guest.py +++ b/cloud/vsphere_guest.py @@ -67,7 +67,16 @@ options: description: - Indicate desired state of the vm. default: present - choices: ['present', 'powered_on', 'absent', 'powered_off', 'restarted', 'reconfigured'] + choices: ['present', 'powered_on', 'absent', 'powered_on', 'restarted', 'reconfigured'] + from_template: + description: + - Specifies if the VM should be deployed from a template (cannot be ran with state) + default: no + choices: ['yes', 'no'] + template_src: + description: + - Name of the source template to deploy from + default: None vm_disk: description: - A key, value list of disks and their sizes and which datastore to keep it in. @@ -181,6 +190,18 @@ EXAMPLES = ''' datacenter: MyDatacenter hostname: esx001.mydomain.local +# Deploy a guest from a template +# No reconfiguration of the destination guest is done at this stage, a reconfigure would be needed to adjust memory/cpu etc.. +- vsphere_guest: + vcenter_hostname: vcenter.mydomain.local + username: myuser + password: mypass + guest: newvm001 + from_template: yes + template_src: centosTemplate + cluster: MainCluster + resource_pool: "/Resources" + # Task to gather facts from a vSphere cluster only if the system is a VMWare guest - vsphere_guest: @@ -192,12 +213,14 @@ EXAMPLES = ''' # Typical output of a vsphere_facts run on a guest +# If vmware tools is not installed, ipadresses with return None - hw_eth0: - addresstype: "assigned" label: "Network adapter 1" macaddress: "00:22:33:33:44:55" macaddress_dash: "00-22-33-33-44-55" + ipaddresses: ['192.0.2.100', '2001:DB8:56ff:feac:4d8a'] summary: "VM Network" hw_guest_full_name: "newvm001" hw_guest_id: "rhel6_64Guest" @@ -488,6 +511,49 @@ def vmdisk_id(vm, current_datastore_name): return id_list +def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name): + vmTemplate = vsphere_client.get_vm_by_name(template_src) + vmTarget = None + + try: + cluster = [k for k, + v in vsphere_client.get_clusters().items() if v == cluster_name][0] + except IndexError, e: + vsphere_client.disconnect() + module.fail_json(msg="Cannot find Cluster named: %s" % + cluster_name) + + try: + rpmor = [k for k, v in vsphere_client.get_resource_pools( + from_mor=cluster).items() + if v == resource_pool][0] + except IndexError, e: + vsphere_client.disconnect() + module.fail_json(msg="Cannot find Resource Pool named: %s" % + resource_pool) + + try: + vmTarget = vsphere_client.get_vm_by_name(guest) + except Exception: + pass + if not vmTemplate.properties.config.template: + module.fail_json( + msg="Target %s is not a registered template" % template_src + ) + try: + if vmTarget: + changed = False + else: + vmTemplate.clone(guest, resourcepool=rpmor) + changed = True + vsphere_client.disconnect() + module.exit_json(changed=changed) + except Exception as e: + module.fail_json( + msg="Could not clone selected machine: %s" % e + ) + + def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force): spec = None changed = False @@ -618,7 +684,16 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, hfmor = dcprops.hostFolder._obj # virtualmachineFolder managed object reference - vmfmor = dcprops.vmFolder._obj + if vm_extra_config['folder']: + if vm_extra_config['folder'] not in vsphere_client._get_managed_objects(MORTypes.Folder).values(): + vsphere_client.disconnect() + module.fail_json(msg="Cannot find folder named: %s" % vm_extra_config['folder']) + + for mor, name in vsphere_client._get_managed_objects(MORTypes.Folder).iteritems(): + if name == vm_extra_config['folder']: + vmfmor = mor + else: + vmfmor = dcprops.vmFolder._obj # networkFolder managed object reference nfmor = dcprops.networkFolder._obj @@ -936,6 +1011,11 @@ def gather_facts(vm): 'hw_processor_count': vm.properties.config.hardware.numCPU, 'hw_memtotal_mb': vm.properties.config.hardware.memoryMB, } + netInfo = vm.get_property('net') + netDict = {} + if netInfo: + for net in netInfo: + netDict[net['mac_address']] = net['ip_addresses'] ifidx = 0 for entry in vm.properties.config.hardware.device: @@ -948,6 +1028,7 @@ def gather_facts(vm): 'addresstype': entry.addressType, 'label': entry.deviceInfo.label, 'macaddress': entry.macAddress, + 'ipaddresses': netDict.get(entry.macAddress, None), 'macaddress_dash': entry.macAddress.replace(':', '-'), 'summary': entry.deviceInfo.summary, } @@ -1066,6 +1147,8 @@ def main(): ], default='present'), vmware_guest_facts=dict(required=False, choices=BOOLEANS), + from_template=dict(required=False, choices=BOOLEANS), + template_src=dict(required=False, type='str'), guest=dict(required=True, type='str'), vm_disk=dict(required=False, type='dict', default={}), vm_nic=dict(required=False, type='dict', default={}), @@ -1080,7 +1163,7 @@ def main(): ), supports_check_mode=False, - mutually_exclusive=[['state', 'vmware_guest_facts']], + mutually_exclusive=[['state', 'vmware_guest_facts'],['state', 'from_template']], required_together=[ ['state', 'force'], [ @@ -1090,7 +1173,8 @@ def main(): 'vm_hardware', 'esxi' ], - ['resource_pool', 'cluster'] + ['resource_pool', 'cluster'], + ['from_template', 'resource_pool', 'template_src'] ], ) @@ -1112,6 +1196,8 @@ def main(): esxi = module.params['esxi'] resource_pool = module.params['resource_pool'] cluster = module.params['cluster'] + template_src = module.params['template_src'] + from_template = module.params['from_template'] # CONNECT TO THE SERVER viserver = VIServer() @@ -1135,7 +1221,6 @@ def main(): except Exception, e: module.fail_json( msg="Fact gather failed with exception %s" % e) - # Power Changes elif state in ['powered_on', 'powered_off', 'restarted']: state_result = power_state(vm, state, force) @@ -1183,6 +1268,17 @@ def main(): module.fail_json( msg="No such VM %s. Fact gathering requires an existing vm" % guest) + + elif from_template: + deploy_template( + vsphere_client=viserver, + esxi=esxi, + resource_pool=resource_pool, + guest=guest, + template_src=template_src, + module=module, + cluster_name=cluster + ) if state in ['restarted', 'reconfigured']: module.fail_json( msg="No such VM %s. States [" From 3a40d79cff8332d2cbb0f418d6a450d3b7f70360 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Tue, 30 Sep 2014 11:51:05 -0400 Subject: [PATCH 012/250] Update win_user module to support more user options and group membership changes. --- windows/win_user.ps1 | 248 ++++++++++++++++++++++++++++++++++++------- windows/win_user.py | 96 +++++++++++++++-- 2 files changed, 296 insertions(+), 48 deletions(-) diff --git a/windows/win_user.ps1 b/windows/win_user.ps1 index 306d7a0db2f..a805fac7f25 100644 --- a/windows/win_user.ps1 +++ b/windows/win_user.ps1 @@ -20,6 +20,9 @@ # POWERSHELL_COMMON ######## +$ADS_UF_PASSWD_CANT_CHANGE = 64 +$ADS_UF_DONT_EXPIRE_PASSWD = 65536 + $adsi = [ADSI]"WinNT://$env:COMPUTERNAME" function Get-User($user) { @@ -27,22 +30,23 @@ function Get-User($user) { return } -function Create-User([string]$user, [string]$passwd) { - $adsiuser = $adsi.Create("User", $user) - $adsiuser.SetPassword($passwd) - $adsiuser.SetInfo() - $adsiuser - return +function Get-UserFlag($user, $flag) { + If ($user.UserFlags[0] -band $flag) { + $true + } + Else { + $false + } } -function Update-Password($user, [string]$passwd) { - $user.SetPassword($passwd) - $user.SetInfo() +function Set-UserFlag($user, $flag) { + $user.UserFlags = ($user.UserFlags[0] -BOR $flag) } -function Delete-User($user) { - $adsi.delete("user", $user.Name.Value) +function Clear-UserFlag($user, $flag) { + $user.UserFlags = ($user.UserFlags[0] -BXOR $flag) } + ######## $params = Parse-Args $args; @@ -51,56 +55,193 @@ $result = New-Object psobject @{ changed = $false }; -If (-not $params.name.GetType) -{ +If (-not $params.name.GetType) { Fail-Json $result "missing required arguments: name" } +$username = Get-Attr $params "name" +$fullname = Get-Attr $params "fullname" +$description = Get-Attr $params "description" +$password = Get-Attr $params "password" + If ($params.state) { $state = $params.state.ToString().ToLower() - If (($state -ne 'present') -and ($state -ne 'absent')) { - Fail-Json $result "state is '$state'; must be 'present' or 'absent'" + If (($state -ne 'present') -and ($state -ne 'absent') -and ($state -ne 'query')) { + Fail-Json $result "state is '$state'; must be 'present', 'absent' or 'query'" } } -Elseif (!$params.state) { +ElseIf (!$params.state) { $state = "present" } -If ((-not $params.password.GetType) -and ($state -eq 'present')) -{ - Fail-Json $result "missing required arguments: password" +If ($params.update_password) { + $update_password = $params.update_password.ToString().ToLower() + If (($update_password -ne 'always') -and ($update_password -ne 'on_create')) { + Fail-Json $result "update_password is '$update_password'; must be 'always' or 'on_create'" + } +} +ElseIf (!$params.update_password) { + $update_password = "always" } -$username = Get-Attr $params "name" -$password = Get-Attr $params "password" +$password_expired = Get-Attr $params "password_expired" $null +If ($password_expired -ne $null) { + $password_expired = $password_expired | ConvertTo-Bool +} + +$password_never_expires = Get-Attr $params "password_never_expires" $null +If ($password_never_expires -ne $null) { + $password_never_expires = $password_never_expires | ConvertTo-Bool +} + +$user_cannot_change_password = Get-Attr $params "user_cannot_change_password" $null +If ($user_cannot_change_password -ne $null) { + $user_cannot_change_password = $user_cannot_change_password | ConvertTo-Bool +} + +$account_disabled = Get-Attr $params "account_disabled" $null +If ($account_disabled -ne $null) { + $account_disabled = $account_disabled | ConvertTo-Bool +} + +$account_locked = Get-Attr $params "account_locked" $null +If ($account_locked -ne $null) { + $account_locked = $account_locked | ConvertTo-Bool + if ($account_locked) { + Fail-Json $result "account_locked must be set to 'no' if provided" + } +} + +$groups = Get-Attr $params "groups" $null +If ($groups -ne $null) { + If ($groups.GetType().Name -eq "String") { + [string[]]$groups = $groups.Split(",") + } + ElseIf ($groups.GetType().Name -ne "Object[]") { + Fail-Json $result "groups must be a string or array" + } + $groups = $groups | ForEach { ([string]$_).Trim() } | Where { $_ } + If ($groups -eq $null) { + $groups = @() + } +} + +If ($params.groups_action) { + $groups_action = $params.groups_action.ToString().ToLower() + If (($groups_action -ne 'replace') -and ($groups_action -ne 'add') -and ($groups_action -ne 'remove')) { + Fail-Json $result "groups_action is '$groups_action'; must be 'replace', 'add' or 'remove'" + } +} +ElseIf (!$params.groups_action) { + $groups_action = "replace" +} $user_obj = Get-User $username -if ($state -eq 'present') { +If ($state -eq 'present') { # Add or update user try { - if ($user_obj.GetType) { - Update-Password $user_obj $password + If (!$user_obj.GetType) { + $user_obj = $adsi.Create("User", $username) + If ($password -ne $null) { + $user_obj.SetPassword($password) + } + $result.changed = $true + } + ElseIf (($password -ne $null) -and ($update_password -eq 'always')) { + [void][system.reflection.assembly]::LoadWithPartialName('System.DirectoryServices.AccountManagement') + $pc = New-Object -TypeName System.DirectoryServices.AccountManagement.PrincipalContext 'Machine', $env:COMPUTERNAME + # FIXME: ValidateCredentials fails if PasswordExpired == 1 + If (!$pc.ValidateCredentials($username, $password)) { + $user_obj.SetPassword($password) + $result.changed = $true + } + } + If (($fullname -ne $null) -and ($fullname -ne $user_obj.FullName[0])) { + $user_obj.FullName = $fullname + $result.changed = $true + } + If (($description -ne $null) -and ($description -ne $user_obj.Description[0])) { + $user_obj.Description = $description + $result.changed = $true + } + If (($password_expired -ne $null) -and ($password_expired -ne ($user_obj.PasswordExpired | ConvertTo-Bool))) { + $user_obj.PasswordExpired = If ($password_expired) { 1 } Else { 0 } + $result.changed = $true + } + If (($password_never_expires -ne $null) -and ($password_never_expires -ne (Get-UserFlag $user_obj $ADS_UF_DONT_EXPIRE_PASSWD))) { + If ($password_never_expires) { + Set-UserFlag $user_obj $ADS_UF_DONT_EXPIRE_PASSWD + } + Else { + Clear-UserFlag $user_obj $ADS_UF_DONT_EXPIRE_PASSWD + } + $result.changed = $true } - else { - Create-User $username $password + If (($user_cannot_change_password -ne $null) -and ($user_cannot_change_password -ne (Get-UserFlag $user_obj $ADS_UF_PASSWD_CANT_CHANGE))) { + If ($user_cannot_change_password) { + Set-UserFlag $user_obj $ADS_UF_PASSWD_CANT_CHANGE + } + Else { + Clear-UserFlag $user_obj $ADS_UF_PASSWD_CANT_CHANGE + } + $result.changed = $true + } + If (($account_disabled -ne $null) -and ($account_disabled -ne $user_obj.AccountDisabled)) { + $user_obj.AccountDisabled = $account_disabled + $result.changed = $true + } + If (($account_locked -ne $null) -and ($account_locked -ne $user_obj.IsAccountLocked)) { + $user_obj.IsAccountLocked = $account_locked + $result.changed = $true + } + If ($groups.GetType) { + [string[]]$current_groups = $user_obj.Groups() | ForEach { $_.GetType().InvokeMember("Name", "GetProperty", $null, $_, $null) } + If (($groups_action -eq "remove") -or ($groups_action -eq "replace")) { + ForEach ($grp in $current_groups) { + If ((($groups_action -eq "remove") -and ($groups -contains $grp)) -or (($groups_action -eq "replace") -and ($groups -notcontains $grp))) { + $group_obj = $adsi.Children | where { $_.SchemaClassName -eq 'Group' -and $_.Name -eq $grp } + If ($group_obj.GetType) { + $group_obj.Remove($user_obj.Path) + $result.changed = $true + } + Else { + Fail-Json $result "group '$grp' not found" + } + } + } + } + If (($groups_action -eq "add") -or ($groups_action -eq "replace")) { + ForEach ($grp in $groups) { + If ($current_groups -notcontains $grp) { + $group_obj = $adsi.Children | where { $_.SchemaClassName -eq 'Group' -and $_.Name -eq $grp } + If ($group_obj.GetType) { + $group_obj.Add($user_obj.Path) + $result.changed = $true + } + Else { + Fail-Json $result "group '$grp' not found" + } + } + } + } + } + If ($result.changed) { + $user_obj.SetInfo() } - $result.changed = $true - $user_obj = Get-User $username } catch { Fail-Json $result $_.Exception.Message } } -else { +ElseIf ($state -eq 'absent') { # Remove user try { - if ($user_obj.GetType) { - Delete-User $user_obj + If ($user_obj.GetType) { + $username = $user_obj.Name.Value + $adsi.delete("User", $user_obj.Name.Value) $result.changed = $true - } - else { - Set-Attr $result "msg" "User '$username' was not found" + $user_obj = $null } } catch { @@ -108,9 +249,38 @@ else { } } -# Set-Attr $result "user" $user_obj -Set-Attr $result "user_name" $user_obj.Name -Set-Attr $result "user_fullname" $user_obj.FullName -Set-Attr $result "user_path" $user_obj.Path +try { + If ($user_obj.GetType) { + $user_obj.RefreshCache() + Set-Attr $result "name" $user_obj.Name[0] + Set-Attr $result "fullname" $user_obj.FullName[0] + Set-Attr $result "path" $user_obj.Path + Set-Attr $result "description" $user_obj.Description[0] + Set-Attr $result "password_expired" ($user_obj.PasswordExpired | ConvertTo-Bool) + Set-Attr $result "password_never_expires" (Get-UserFlag $user_obj $ADS_UF_DONT_EXPIRE_PASSWD) + Set-Attr $result "user_cannot_change_password" (Get-UserFlag $user_obj $ADS_UF_PASSWD_CANT_CHANGE) + Set-Attr $result "account_disabled" $user_obj.AccountDisabled + Set-Attr $result "account_locked" $user_obj.IsAccountLocked + Set-Attr $result "sid" (New-Object System.Security.Principal.SecurityIdentifier($user_obj.ObjectSid.Value, 0)).Value + $user_groups = @() + ForEach ($grp in $user_obj.Groups()) { + $group_result = New-Object psobject @{ + name = $grp.GetType().InvokeMember("Name", "GetProperty", $null, $grp, $null) + path = $grp.GetType().InvokeMember("ADsPath", "GetProperty", $null, $grp, $null) + } + $user_groups += $group_result; + } + Set-Attr $result "groups" $user_groups + Set-Attr $result "state" "present" + } + Else { + Set-Attr $result "name" $username + Set-Attr $result "msg" "User '$username' was not found" + Set-Attr $result "state" "absent" + } +} +catch { + Fail-Json $result $_.Exception.Message +} -Exit-Json $result; +Exit-Json $result diff --git a/windows/win_user.py b/windows/win_user.py index e2da6a1ddb8..6d3620fabbd 100644 --- a/windows/win_user.py +++ b/windows/win_user.py @@ -31,32 +31,109 @@ description: options: name: description: - - Username of the user to manage + - Name of the user to create, remove or modify. required: true + fullname: + description: + - Full name of the user + required: false default: null - aliases: [] + version_added: "1.8" + description: + description: + - Description of the user + required: false + default: null + version_added: "1.8" password: description: - - Password for the user (plain text) - required: true + - Optionally set the user's password to this (plain text) value. + required: false default: null - aliases: [] + update_password: + description: + - C(always) will update passwords if they differ. C(on_create) will + only set the password for newly created users. + required: false + choices: [ 'always', 'on_create' ] + default: always + version_added: "1.8" + password_expired: + description: + - C(yes) will require the user to change their password at next login. + C(no) will clear the expired password flag. + required: false + choices: [ 'yes', 'no' ] + default: null + version_added: "1.8" + password_never_expires: + description: + - C(yes) will set the password to never expire. C(no) will allow the + password to expire. + required: false + choices: [ 'yes', 'no' ] + default: null + version_added: "1.8" + user_cannot_change_password: + description: + - C(yes) will prevent the user from changing their password. C(no) will + allow the user to change their password. + required: false + choices: [ 'yes', 'no' ] + default: null + version_added: "1.8" + account_disabled: + description: + - C(yes) will disable the user account. C(no) will clear the disabled + flag. + required: false + choices: [ 'yes', 'no' ] + default: null + version_added: "1.8" + account_locked: + description: + - C(no) will unlock the user account if locked. + required: false + choices: [ 'no' ] + default: null + version_added: "1.8" + groups: + description: + - Adds or removes the user from this comma-separated lis of groups, + depending on the value of I(groups_action). When I(groups_action) is + C(replace) and I(groups) is set to the empty string ('groups='), the + user is removed from all groups. + required: false + version_added: "1.8" + groups_action: + description: + - If C(replace), the user is added as a member of each group in + I(groups) and removed from any other groups. If C(add), the user is + added to each group in I(groups) where not already a member. If + C(remove), the user is removed from each group in I(groups). + required: false + choices: [ "replace", "add", "remove" ] + default: "replace" + version_added: "1.8" state: description: - - Whether to create or delete a user + - When C(present), creates or updates the user account. When C(absent), + removes the user account if it exists. When C(query) (new in 1.8), + retrieves the user account details without making any changes. required: false choices: - present - absent + - query default: present aliases: [] -author: Paul Durivage +author: Paul Durivage / Chris Church ''' EXAMPLES = ''' # Ad-hoc example -$ ansible -i hosts -m win_user -a "name=bob password=Password12345" all -$ ansible -i hosts -m win_user -a "name=bob password=Password12345 state=absent" all +$ ansible -i hosts -m win_user -a "name=bob password=Password12345 groups=Users" all +$ ansible -i hosts -m win_user -a "name=bob state=absent" all # Playbook example --- @@ -68,4 +145,5 @@ $ ansible -i hosts -m win_user -a "name=bob password=Password12345 state=absent" win_user: name: ansible password: "@ns1bl3" + groups: ["Users"] ''' From c9f8dcff99bcbe38718d8e660222d9f58eba7c39 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Tue, 30 Sep 2014 16:41:34 -0400 Subject: [PATCH 013/250] Update win_feature module to work on server 2008 and fail with error message on non-server windows, fix output messages and error handling. --- windows/win_feature.ps1 | 70 +++++++++++++++++++++++++++-------------- 1 file changed, 47 insertions(+), 23 deletions(-) diff --git a/windows/win_feature.ps1 b/windows/win_feature.ps1 index a0776a4bf1a..a54007b47bf 100644 --- a/windows/win_feature.ps1 +++ b/windows/win_feature.ps1 @@ -23,7 +23,7 @@ Import-Module Servermanager; $params = Parse-Args $args; -$result = New-Object psobject @{ +$result = New-Object PSObject -Property @{ changed = $false } @@ -70,19 +70,33 @@ Else $includemanagementtools = $false } - - If ($state -eq "present") { try { - $featureresult = Add-WindowsFeature -Name $name -Restart:$restart -IncludeAllSubFeature:$includesubfeatures -IncludeManagementTools:$includemanagementtools + If (Get-Command "Install-WindowsFeature" -ErrorAction SilentlyContinue) { + $featureresult = Install-WindowsFeature -Name $name -Restart:$restart -IncludeAllSubFeature:$includesubfeatures -IncludeManagementTools:$includemanagementtools -ErrorAction SilentlyContinue + } + ElseIf (Get-Command "Add-WindowsFeature" -ErrorAction SilentlyContinue) { + $featureresult = Add-WindowsFeature -Name $name -Restart:$restart -IncludeAllSubFeature:$includesubfeatures -ErrorAction SilentlyContinue + } + Else { + Fail-Json $result "Not supported on this version of Windows" + } } catch { Fail-Json $result $_.Exception.Message } } -Elseif ($state -eq "absent") { +ElseIf ($state -eq "absent") { try { - $featureresult = Remove-WindowsFeature -Name $name -Restart:$restart + If (Get-Command "Uninstall-WindowsFeature" -ErrorAction SilentlyContinue) { + $featureresult = Uninstall-WindowsFeature -Name $name -Restart:$restart -ErrorAction SilentlyContinue + } + ElseIf (Get-Command "Remove-WindowsFeature" -ErrorAction SilentlyContinue) { + $featureresult = Remove-WindowsFeature -Name $name -Restart:$restart -ErrorAction SilentlyContinue + } + Else { + Fail-Json $result "Not supported on this version of Windows" + } } catch { Fail-Json $result $_.Exception.Message @@ -93,30 +107,40 @@ Elseif ($state -eq "absent") { # each role/feature that is installed/removed $installed_features = @() #$featureresult.featureresult is filled if anything was changed -if ($featureresult.FeatureResult) +If ($featureresult.FeatureResult) { ForEach ($item in $featureresult.FeatureResult) { - $installed_features += New-Object psobject @{ - id = $item.id.ToString() + $message = @() + ForEach ($msg in $item.Message) { + $message += New-Object PSObject -Property @{ + message_type = $msg.MessageType.ToString() + error_code = $msg.ErrorCode + text = $msg.Text + } + } + $installed_features += New-Object PSObject -Property @{ + id = $item.Id display_name = $item.DisplayName - message = $item.Message.ToString() - restart_needed = $item.RestartNeeded.ToString() + message = $message + restart_needed = $item.RestartNeeded.ToString() | ConvertTo-Bool skip_reason = $item.SkipReason.ToString() - success = $item.Success.ToString() + success = $item.Success.ToString() | ConvertTo-Bool } } - Set-Attr $result "feature_result" $installed_features - - $result.changed = $true } -Else -{ - Set-Attr $result "feature_result" $null -} -Set-Attr $result "feature_success" $featureresult.Success.ToString() -Set-Attr $result "feature_exitcode" $featureresult.ExitCode.ToString() -Set-Attr $result "feature_restart_needed" $featureresult.RestartNeeded.ToString() +Set-Attr $result "feature_result" $installed_features +Set-Attr $result "success" ($featureresult.Success.ToString() | ConvertTo-Bool) +Set-Attr $result "exitcode" $featureresult.ExitCode.ToString() +Set-Attr $result "restart_needed" ($featureresult.RestartNeeded.ToString() | ConvertTo-Bool) -Exit-Json $result; +If ($result.success) { + Exit-Json $result +} +ElseIf ($state -eq "present") { + Fail-Json $result "Failed to add feature" +} +Else { + Fail-Json $result "Failed to remove feature" +} From 1e9680aa6bbe8c70a24c430813118821d0eab70f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A9r=C3=A9mie=20Astori?= Date: Tue, 30 Sep 2014 23:04:54 -0400 Subject: [PATCH 014/250] Fix #91: Expand user home folder for the key_file path of the git module --- source_control/git.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source_control/git.py b/source_control/git.py index a5d94e3dbbe..47dab35812b 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -518,6 +518,10 @@ def main(): else: gitconfig = os.path.join(dest, '.git', 'config') + # make sure the key_file path is expanded for ~ and $HOME + if key_file is not None: + key_file = os.path.abspath(os.path.expanduser(key_file)) + # create a wrapper script and export # GIT_SSH= as an environment variable # for git to use the wrapper script From 204a0dc1313f7b64ce8e279cfd5f31f7b22a3658 Mon Sep 17 00:00:00 2001 From: Nate Coraor Date: Wed, 1 Oct 2014 00:31:33 -0400 Subject: [PATCH 015/250] Fix a few bugs and misbehavior in the hg module: 1. Don't pull when `dest` is already at the desired changeset. 2. Don't change the working copy when cleaning or pulling and a revision was specified. 3. Change the default for the `revision` param to match the behavior of hg. --- source_control/hg.py | 40 +++++++++++++++++++++++++++++++++------- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/source_control/hg.py b/source_control/hg.py index 1b95bcd5ac3..c2bd0d9d953 100644 --- a/source_control/hg.py +++ b/source_control/hg.py @@ -2,6 +2,7 @@ #-*- coding: utf-8 -*- # (c) 2013, Yeukhon Wong +# (c) 2014, Nate Coraor # # This module was originally inspired by Brad Olson's ansible-module-mercurial # . This module tends @@ -49,7 +50,7 @@ options: - Equivalent C(-r) option in hg command which could be the changeset, revision number, branch name or even tag. required: false - default: "default" + default: null aliases: [ version ] force: description: @@ -128,7 +129,10 @@ class Hg(object): if not before: return False - (rc, out, err) = self._command(['update', '-C', '-R', self.dest]) + args = ['update', '-C', '-R', self.dest] + if self.revision is not None: + args = args + ['-r', self.revision] + (rc, out, err) = self._command(args) if rc != 0: self.module.fail_json(msg=err) @@ -170,13 +174,30 @@ class Hg(object): ['pull', '-R', self.dest, self.repo]) def update(self): + if self.revision is not None: + return self._command(['update', '-r', self.revision, '-R', self.dest]) return self._command(['update', '-R', self.dest]) def clone(self): - return self._command(['clone', self.repo, self.dest, '-r', self.revision]) + if self.revision is not None: + return self._command(['clone', self.repo, self.dest, '-r', self.revision]) + return self._command(['clone', self.repo, self.dest]) - def switch_version(self): - return self._command(['update', '-r', self.revision, '-R', self.dest]) + @property + def at_revision(self): + """ + There is no point in pulling from a potentially down/slow remote site + if the desired changeset is already the current changeset. + """ + if self.revision is None or len(self.revision) < 7: + # Assume it's a rev number, tag, or branch + return False + (rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest]) + if rc != 0: + self.module.fail_json(msg=err) + if out.startswith(self.revision): + return True + return False # =========================================== @@ -185,7 +206,7 @@ def main(): argument_spec = dict( repo = dict(required=True, aliases=['name']), dest = dict(required=True), - revision = dict(default="default", aliases=['version']), + revision = dict(default=None, aliases=['version']), force = dict(default='yes', type='bool'), purge = dict(default='no', type='bool'), executable = dict(default=None), @@ -212,6 +233,12 @@ def main(): (rc, out, err) = hg.clone() if rc != 0: module.fail_json(msg=err) + elif hg.at_revision: + # no update needed, don't pull + before = hg.get_revision() + + # but force and purge if desired + cleaned = hg.cleanup(force, purge) else: # get the current state before doing pulling before = hg.get_revision() @@ -227,7 +254,6 @@ def main(): if rc != 0: module.fail_json(msg=err) - hg.switch_version() after = hg.get_revision() if before != after or cleaned: changed = True From 0a399fd97193f994071ac1fac7a040ccd1caee16 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 12 Aug 2014 13:47:38 -0500 Subject: [PATCH 016/250] Support config_drive and user_data in rax_scaling_group --- cloud/rax_scaling_group.py | 46 +++++++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 6 deletions(-) diff --git a/cloud/rax_scaling_group.py b/cloud/rax_scaling_group.py index d884d3c1303..16be9ac4c0f 100644 --- a/cloud/rax_scaling_group.py +++ b/cloud/rax_scaling_group.py @@ -24,6 +24,14 @@ description: - Manipulate Rackspace Cloud Autoscale Groups version_added: 1.7 options: + config_drive: + description: + - Attach read-only configuration drive to server as label config-2 + default: no + choices: + - "yes" + - "no" + version_added: 1.8 cooldown: description: - The period of time, in seconds, that must pass before any scaling can @@ -92,6 +100,11 @@ options: - present - absent default: present + user_data: + description: + - Data to be uploaded to the servers config drive. This option implies + I(config_drive). Can be a file path or a string + version_added: 1.8 author: Matt Martz extends_documentation_fragment: rackspace ''' @@ -128,17 +141,27 @@ except ImportError: def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, image=None, key_name=None, loadbalancers=[], meta={}, min_entities=0, max_entities=0, name=None, networks=[], - server_name=None, state='present'): + server_name=None, state='present', user_data=None, + config_drive=False): changed = False au = pyrax.autoscale - cnw = pyrax.cloud_networks - cs = pyrax.cloudservers - if not au or not cnw or not cs: + if not au: module.fail_json(msg='Failed to instantiate clients. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') + if user_data: + config_drive = True + + if user_data and os.path.isfile(user_data): + try: + f = open(user_data) + user_data = f.read() + f.close() + except Exception, e: + module.fail_json(msg='Failed to load %s' % user_data) + if state == 'present': # Normalize and ensure all metadata values are strings if meta: @@ -204,7 +227,8 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, flavor=flavor, disk_config=disk_config, metadata=meta, personality=files, networks=nics, load_balancers=lbs, - key_name=key_name) + key_name=key_name, config_drive=config_drive, + user_data=user_data) changed = True except Exception, e: module.fail_json(msg='%s' % e.message) @@ -256,6 +280,12 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, if key_name != lc.get('key_name'): lc_args['key_name'] = key_name + if config_drive != lc.get('config_drive'): + lc_args['config_drive'] = config_drive + + if user_data != lc.get('user_data'): + lc_args['user_data'] = user_data + if lc_args: # Work around for https://github.com/rackspace/pyrax/pull/389 if 'flavor' not in lc_args: @@ -284,6 +314,7 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( + config_drive=dict(default=False, type='bool'), cooldown=dict(type='int', default=300), disk_config=dict(choices=['auto', 'manual']), files=dict(type='list', default=[]), @@ -298,6 +329,7 @@ def main(): networks=dict(type='list', default=['public', 'private']), server_name=dict(required=True), state=dict(default='present', choices=['present', 'absent']), + user_data=dict(no_log=True), ) ) @@ -309,6 +341,7 @@ def main(): if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module') + config_drive = module.params.get('config_drive') cooldown = module.params.get('cooldown') disk_config = module.params.get('disk_config') if disk_config: @@ -325,6 +358,7 @@ def main(): networks = module.params.get('networks') server_name = module.params.get('server_name') state = module.params.get('state') + user_data = module.params.get('user_data') if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000: module.fail_json(msg='min_entities and max_entities must be an ' @@ -340,7 +374,7 @@ def main(): key_name=key_name, loadbalancers=loadbalancers, min_entities=min_entities, max_entities=max_entities, name=name, networks=networks, server_name=server_name, - state=state) + state=state, config_drive=config_drive, user_data=user_data) # import module snippets From 924bff94817302a003e7bec64594fcb4b7c6bb3c Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 2 Sep 2014 14:46:31 -0500 Subject: [PATCH 017/250] Ensure that files is the right type, and that we send the formatted personality var --- cloud/rax_scaling_group.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/rax_scaling_group.py b/cloud/rax_scaling_group.py index 16be9ac4c0f..3b7fa8f4aca 100644 --- a/cloud/rax_scaling_group.py +++ b/cloud/rax_scaling_group.py @@ -225,7 +225,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, launch_config_type='launch_server', server_name=server_name, image=image, flavor=flavor, disk_config=disk_config, - metadata=meta, personality=files, + metadata=meta, personality=personality, networks=nics, load_balancers=lbs, key_name=key_name, config_drive=config_drive, user_data=user_data) @@ -317,7 +317,7 @@ def main(): config_drive=dict(default=False, type='bool'), cooldown=dict(type='int', default=300), disk_config=dict(choices=['auto', 'manual']), - files=dict(type='list', default=[]), + files=dict(type='dict', default={}), flavor=dict(required=True), image=dict(required=True), key_name=dict(), From fbadfd590298097545cef147bfac55a70a019c5e Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Tue, 2 Sep 2014 14:47:44 -0500 Subject: [PATCH 018/250] Fix determination if a few configurations change in the launch config --- cloud/rax_scaling_group.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/cloud/rax_scaling_group.py b/cloud/rax_scaling_group.py index 3b7fa8f4aca..dcd8dbbfeaa 100644 --- a/cloud/rax_scaling_group.py +++ b/cloud/rax_scaling_group.py @@ -131,6 +131,8 @@ EXAMPLES = ''' register: asg ''' +import base64 + try: import pyrax HAS_PYRAX = True @@ -261,14 +263,23 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, if flavor != lc.get('flavor'): lc_args['flavor'] = flavor - if disk_config != lc.get('disk_config'): + disk_config = disk_config or 'AUTO' + if ((disk_config or lc.get('disk_config')) and + disk_config != lc.get('disk_config')): lc_args['disk_config'] = disk_config - if meta != lc.get('metadata'): + if (meta or lc.get('meta')) and meta != lc.get('metadata'): lc_args['metadata'] = meta - if files != lc.get('personality'): - lc_args['personality'] = files + test_personality = [] + for p in personality: + test_personality.append({ + 'path': p['path'], + 'contents': base64.b64encode(p['contents']) + }) + if ((test_personality or lc.get('personality')) and + test_personality != lc.get('personality')): + lc_args['personality'] = personality if nics != lc.get('networks'): lc_args['networks'] = nics @@ -283,7 +294,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, if config_drive != lc.get('config_drive'): lc_args['config_drive'] = config_drive - if user_data != lc.get('user_data'): + if base64.b64encode(user_data) != lc.get('user_data'): lc_args['user_data'] = user_data if lc_args: From 5dcc0ff0d9bf80c9f21331d7ff385b86845dbd87 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 25 Sep 2014 08:55:35 -0500 Subject: [PATCH 019/250] Don't count DELETED servers when adding new servers --- cloud/rax.py | 187 +++++++++++++++++++++++++++++++++------------------ 1 file changed, 122 insertions(+), 65 deletions(-) diff --git a/cloud/rax.py b/cloud/rax.py index e01367ed5bd..5fa1b57386a 100644 --- a/cloud/rax.py +++ b/cloud/rax.py @@ -64,7 +64,10 @@ options: exact_count: description: - Explicitly ensure an exact count of instances, used with - state=active/present + state=active/present. If specified as C(yes) and I(count) is less than + the servers matched, servers will be deleted to match the count. If + the number of matched servers is fewer than specified in I(count) + additional servers will be added. default: no choices: - "yes" @@ -150,6 +153,12 @@ options: - how long before wait gives up, in seconds default: 300 author: Jesse Keating, Matt Martz +notes: + - I(exact_count) can be "destructive" if the number of running servers in + the I(group) is larger than that specified in I(count). In such a case, the + I(state) is effectively set to C(absent) and the extra servers are deleted. + In the case of deletion, the returned data structure will have C(action) + set to C(delete), and the oldest servers in the group will be deleted. extends_documentation_fragment: rackspace.openstack ''' @@ -441,79 +450,102 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, if group is None: module.fail_json(msg='"group" must be provided when using ' '"exact_count"') - else: - if auto_increment: - numbers = set() - try: - name % 0 - except TypeError, e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message) + if auto_increment: + numbers = set() - pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) - for server in cs.servers.list(): - if server.metadata.get('group') == group: - servers.append(server) - match = re.search(pattern, server.name) - if match: - number = int(match.group(1)) - numbers.add(number) - - number_range = xrange(count_offset, count_offset + count) - available_numbers = list(set(number_range) - .difference(numbers)) - else: - for server in cs.servers.list(): - if server.metadata.get('group') == group: - servers.append(server) - - # If state was absent but the count was changed, - # assume we only wanted to remove that number of instances - if was_absent: - diff = len(servers) - count - if diff < 0: - count = 0 + # See if the name is a printf like string, if not append + # %d to the end + try: + name % 0 + except TypeError, e: + if e.message.startswith('not all'): + name = '%s%%d' % name else: - count = diff + module.fail_json(msg=e.message) - if len(servers) > count: - state = 'absent' - kept = servers[:count] - del servers[:count] - instance_ids = [] - for server in servers: - instance_ids.append(server.id) - delete(module, instance_ids=instance_ids, wait=wait, - wait_timeout=wait_timeout, kept=kept) - elif len(servers) < count: - if auto_increment: - names = [] - name_slice = count - len(servers) - numbers_to_use = available_numbers[:name_slice] - for number in numbers_to_use: - names.append(name % number) - else: - names = [name] * (count - len(servers)) + # regex pattern to match printf formatting + pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) + for server in cs.servers.list(): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + if server.metadata.get('group') == group: + servers.append(server) + match = re.search(pattern, server.name) + if match: + number = int(match.group(1)) + numbers.add(number) + + number_range = xrange(count_offset, count_offset + count) + available_numbers = list(set(number_range) + .difference(numbers)) + else: # Not auto incrementing + for server in cs.servers.list(): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + if server.metadata.get('group') == group: + servers.append(server) + # available_numbers not needed here, we inspect auto_increment + # again later + + # If state was absent but the count was changed, + # assume we only wanted to remove that number of instances + if was_absent: + diff = len(servers) - count + if diff < 0: + count = 0 else: - instances = [] - instance_ids = [] - for server in servers: - instances.append(rax_to_dict(server, 'server')) - instance_ids.append(server.id) - module.exit_json(changed=False, action=None, - instances=instances, - success=[], error=[], timeout=[], - instance_ids={'instances': instance_ids, - 'success': [], 'error': [], - 'timeout': []}) - else: + count = diff + + if len(servers) > count: + # We have more servers than we need, set state='absent' + # and delete the extras, this should delete the oldest + state = 'absent' + kept = servers[:count] + del servers[:count] + instance_ids = [] + for server in servers: + instance_ids.append(server.id) + delete(module, instance_ids=instance_ids, wait=wait, + wait_timeout=wait_timeout, kept=kept) + elif len(servers) < count: + # we have fewer servers than we need + if auto_increment: + # auto incrementing server numbers + names = [] + name_slice = count - len(servers) + numbers_to_use = available_numbers[:name_slice] + for number in numbers_to_use: + names.append(name % number) + else: + # We are not auto incrementing server numbers, + # create a list of 'name' that matches how many we need + names = [name] * (count - len(servers)) + else: + # we have the right number of servers, just return info + # about all of the matched servers + instances = [] + instance_ids = [] + for server in servers: + instances.append(rax_to_dict(server, 'server')) + instance_ids.append(server.id) + module.exit_json(changed=False, action=None, + instances=instances, + success=[], error=[], timeout=[], + instance_ids={'instances': instance_ids, + 'success': [], 'error': [], + 'timeout': []}) + else: # not called with exact_count=True if group is not None: if auto_increment: + # we are auto incrementing server numbers, but not with + # exact_count numbers = set() + # See if the name is a printf like string, if not append + # %d to the end try: name % 0 except TypeError, e: @@ -522,8 +554,12 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, else: module.fail_json(msg=e.message) + # regex pattern to match printf formatting pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) for server in cs.servers.list(): + # Ignore DELETED servers + if server.status == 'DELETED': + continue if server.metadata.get('group') == group: servers.append(server) match = re.search(pattern, server.name) @@ -540,8 +576,11 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, for number in numbers_to_use: names.append(name % number) else: + # Not auto incrementing names = [name] * count else: + # No group was specified, and not using exact_count + # Perform more simplistic matching search_opts = { 'name': '^%s$' % name, 'image': image, @@ -549,11 +588,18 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, } servers = [] for server in cs.servers.list(search_opts=search_opts): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + # Ignore servers with non matching metadata if server.metadata != meta: continue servers.append(server) if len(servers) >= count: + # We have more servers than were requested, don't do + # anything. Not running with exact_count=True, so we assume + # more is OK instances = [] for server in servers: instances.append(rax_to_dict(server, 'server')) @@ -566,6 +612,8 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, 'success': [], 'error': [], 'timeout': []}) + # We need more servers to reach out target, create names for + # them, we aren't performing auto_increment here names = [name] * (count - len(servers)) create(module, names=names, flavor=flavor, image=image, @@ -577,6 +625,8 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, elif state == 'absent': if instance_ids is None: + # We weren't given an explicit list of server IDs to delete + # Let's match instead for arg, value in dict(name=name, flavor=flavor, image=image).iteritems(): if not value: @@ -588,10 +638,15 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, 'flavor': flavor } for server in cs.servers.list(search_opts=search_opts): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + # Ignore servers with non matching metadata if meta != server.metadata: continue servers.append(server) + # Build a list of server IDs to delete instance_ids = [] for server in servers: if len(instance_ids) < count: @@ -600,6 +655,8 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, break if not instance_ids: + # No server IDs were matched for deletion, or no IDs were + # explicitly provided, just exit and don't do anything module.exit_json(changed=False, action=None, instances=[], success=[], error=[], timeout=[], instance_ids={'instances': [], From 661b452fe31d8b2fdc6683334ce646eff2efb3d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tadej=20Jane=C5=BE?= Date: Thu, 2 Oct 2014 18:17:27 +0200 Subject: [PATCH 020/250] Simplified condition in a stat module example testing if path is a directory. --- files/stat.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/files/stat.py b/files/stat.py index 8c717a395c4..fe8096516b7 100644 --- a/files/stat.py +++ b/files/stat.py @@ -51,12 +51,12 @@ EXAMPLES = ''' - fail: msg="Whoops! file ownership has changed" when: st.stat.pw_name != 'root' -# Determine if a path exists and is a directory. Note we need to test +# Determine if a path exists and is a directory. Note that we need to test # both that p.stat.isdir actually exists, and also that it's set to true. - stat: path=/path/to/something register: p - debug: msg="Path exists and is a directory" - when: p.stat.isdir is defined and p.stat.isdir == true + when: p.stat.isdir is defined and p.stat.isdir # Don't do md5 checksum - stat: path=/path/to/myhugefile get_md5=no From 9e0565e58f02cbea8731fe2acd0484d92cd601ae Mon Sep 17 00:00:00 2001 From: Jon Hadfield Date: Fri, 3 Oct 2014 13:00:11 +0100 Subject: [PATCH 021/250] Remove redundant check for creates argument. --- files/unarchive.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/files/unarchive.py b/files/unarchive.py index b632aff3f04..f46e52e02a3 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -193,7 +193,6 @@ def main(): src = os.path.expanduser(module.params['src']) dest = os.path.expanduser(module.params['dest']) copy = module.params['copy'] - creates = module.params['creates'] # did tar file arrive? if not os.path.exists(src): @@ -204,20 +203,6 @@ def main(): if not os.access(src, os.R_OK): module.fail_json(msg="Source '%s' not readable" % src) - if creates: - # do not run the command if the line contains creates=filename - # and the filename already exists. This allows idempotence - # of command executions. - v = os.path.expanduser(creates) - if os.path.exists(v): - module.exit_json( - stdout="skipped, since %s exists" % v, - skipped=True, - changed=False, - stderr=False, - rc=0 - ) - # is dest OK to receive tar file? if not os.path.isdir(dest): module.fail_json(msg="Destination '%s' is not a directory" % dest) From 3ab26c538bc6a550aeb8a430913cf92741ad6efa Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Fri, 3 Oct 2014 15:10:45 -0400 Subject: [PATCH 022/250] apt_key: add keyserver example to docs --- packaging/apt_key.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packaging/apt_key.py b/packaging/apt_key.py index 0a483a97bbc..51901e76e6b 100644 --- a/packaging/apt_key.py +++ b/packaging/apt_key.py @@ -81,6 +81,9 @@ options: ''' EXAMPLES = ''' +# Add an apt key by id from a keyserver +- apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9 + # Add an Apt signing key, uses whichever key is at the URL - apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present From 36b72873758861bd287aebefdfc4ef8df2947025 Mon Sep 17 00:00:00 2001 From: Antti Salminen Date: Fri, 9 May 2014 09:53:11 +0300 Subject: [PATCH 023/250] Find the actual commit annotated tags refer to instead of the tag object. --- source_control/git.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/source_control/git.py b/source_control/git.py index a5d94e3dbbe..c9a3176b300 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -287,6 +287,7 @@ def reset(git_path, module, dest): def get_remote_head(git_path, module, dest, version, remote, bare): cloning = False cwd = None + tag = False if remote == module.params['repo']: cloning = True else: @@ -301,7 +302,8 @@ def get_remote_head(git_path, module, dest, version, remote, bare): elif is_remote_branch(git_path, module, dest, remote, version): cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version) elif is_remote_tag(git_path, module, dest, remote, version): - cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version) + tag = True + cmd = '%s ls-remote %s -t refs/tags/%s*' % (git_path, remote, version) else: # appears to be a sha1. return as-is since it appears # cannot check for a specific sha1 on remote @@ -309,6 +311,16 @@ def get_remote_head(git_path, module, dest, version, remote, bare): (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd) if len(out) < 1: module.fail_json(msg="Could not determine remote revision for %s" % version) + + if tag: + # Find the dereferenced tag if this is an annotated tag. + for tag in out.split('\n'): + if tag.endswith(version + '^{}'): + out = tag + break + elif tag.endswith(version): + out = tag + rev = out.split()[0] return rev From 5af8d55b0365a5c3278c43b5424bf5f2ddf897b8 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 8 Oct 2014 14:44:42 -0500 Subject: [PATCH 024/250] Strip newlines off k=v pairs in command/shell argument parsing Fixes ansible/ansible#9272 --- commands/command.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/commands/command.py b/commands/command.py index c1fabd4f9b4..c584d6feed8 100644 --- a/commands/command.py +++ b/commands/command.py @@ -250,12 +250,7 @@ class CommandModule(AnsibleModule): if '=' in x and not quoted: # check to see if this is a special parameter for the command k, v = x.split('=', 1) - v = unquote(v) - # because we're not breaking out quotes in the shlex split - # above, the value of the k=v pair may still be quoted. If - # so, remove them. - if len(v) > 1 and (v.startswith('"') and v.endswith('"') or v.startswith("'") and v.endswith("'")): - v = v[1:-1] + v = unquote(v.strip()) if k in ('creates', 'removes', 'chdir', 'executable', 'NO_LOG'): if k == "chdir": v = os.path.abspath(os.path.expanduser(v)) From 8d9f6053d3c4f8c15bf2d1fdff79efe3f6637255 Mon Sep 17 00:00:00 2001 From: Ricky Cook Date: Wed, 8 Oct 2014 22:19:26 +1100 Subject: [PATCH 025/250] Simplify command module option parsing --- commands/command.py | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/commands/command.py b/commands/command.py index c584d6feed8..90a94fd8369 100644 --- a/commands/command.py +++ b/commands/command.py @@ -18,6 +18,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +import copy import sys import datetime import traceback @@ -99,12 +100,21 @@ EXAMPLES = ''' creates: /path/to/database ''' +OPTIONS = {'chdir': None, + 'creates': None, + 'executable': None, + 'NO_LOG': None, + 'removes': None, + 'warn': True, + } + # This is a pretty complex regex, which functions as follows: # # 1. (^|\s) # ^ look for a space or the beginning of the line -# 2. (creates|removes|chdir|executable|NO_LOG)= -# ^ look for a valid param, followed by an '=' +# 2. ({options_list})= +# ^ expanded to (chdir|creates|executable...)= +# look for a valid param, followed by an '=' # 3. (?P[\'"])? # ^ look for an optional quote character, which can either be # a single or double quote character, and store it for later @@ -114,8 +124,12 @@ EXAMPLES = ''' # ^ a non-escaped space or a non-escaped quote of the same kind # that was matched in the first 'quote' is found, or the end of # the line is reached - -PARAM_REGEX = re.compile(r'(^|\s)(creates|removes|chdir|executable|NO_LOG|warn)=(?P[\'"])?(.*?)(?(quote)(?[\'"])?(.*?)(?(quote)(? Date: Wed, 8 Oct 2014 22:25:02 +1100 Subject: [PATCH 026/250] Add comment to command options dict --- commands/command.py | 1 + 1 file changed, 1 insertion(+) diff --git a/commands/command.py b/commands/command.py index 90a94fd8369..75927a5ba0b 100644 --- a/commands/command.py +++ b/commands/command.py @@ -100,6 +100,7 @@ EXAMPLES = ''' creates: /path/to/database ''' +# Dict of options and their defaults OPTIONS = {'chdir': None, 'creates': None, 'executable': None, From b195b5a6bb65acfbfddc61885df1fe9d721c34a3 Mon Sep 17 00:00:00 2001 From: Ricky Cook Date: Wed, 8 Oct 2014 22:30:20 +1100 Subject: [PATCH 027/250] Get warn option same as other args --- commands/command.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/command.py b/commands/command.py index 75927a5ba0b..44c95a3d5bd 100644 --- a/commands/command.py +++ b/commands/command.py @@ -163,7 +163,7 @@ def main(): args = module.params['args'] creates = module.params['creates'] removes = module.params['removes'] - warn = module.params.get('warn', True) + warn = module.params['warn'] if args.strip() == '': module.fail_json(rc=256, msg="no command given") From 6db328c79a8c1f406fdab4e901732ecc9682ced3 Mon Sep 17 00:00:00 2001 From: Ricky Cook Date: Wed, 8 Oct 2014 22:59:03 +1100 Subject: [PATCH 028/250] Fix regex string format --- commands/command.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/command.py b/commands/command.py index 44c95a3d5bd..2b79b327d71 100644 --- a/commands/command.py +++ b/commands/command.py @@ -127,7 +127,7 @@ OPTIONS = {'chdir': None, # the line is reached OPTIONS_REGEX = '|'.join(OPTIONS.keys()) PARAM_REGEX = re.compile( - r'(^|\s)({options_list})=(?P[\'"])?(.*?)(?(quote)(?[\'"])?(.*?)(?(quote)(? Date: Sat, 11 Oct 2014 21:11:30 -0600 Subject: [PATCH 029/250] fix spelling --- files/synchronize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/synchronize.py b/files/synchronize.py index 842dd863849..74a0e311411 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -39,7 +39,7 @@ options: version_added: "1.5" mode: description: - - Specify the direction of the synchroniztion. In push mode the localhost or delegate is the source; In pull mode the remote host in context is the source. + - Specify the direction of the synchronization. In push mode the localhost or delegate is the source; In pull mode the remote host in context is the source. required: false choices: [ 'push', 'pull' ] default: 'push' From 882109190928a9dc0cc9a84d69cf8bfb506e2b76 Mon Sep 17 00:00:00 2001 From: Sapan Bhatia Date: Tue, 14 Oct 2014 02:24:58 -0400 Subject: [PATCH 030/250] Added user, tenant and password to module parameters declaration -- these parameters are already handled by the implementation --- cloud/keystone_user.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cloud/keystone_user.py b/cloud/keystone_user.py index 5b412ca8008..4af254bfe6d 100644 --- a/cloud/keystone_user.py +++ b/cloud/keystone_user.py @@ -291,6 +291,9 @@ def main(): argument_spec.update(dict( tenant_description=dict(required=False), email=dict(required=False), + user=dict(required=False), + tenant=dict(required=False), + password=dict(required=False), role=dict(required=False), state=dict(default='present', choices=['present', 'absent']), endpoint=dict(required=False, From 9c5cdd6daf478879389660d59dbeafddd80f750a Mon Sep 17 00:00:00 2001 From: Jorge Bastida Date: Tue, 14 Oct 2014 15:22:26 +0100 Subject: [PATCH 031/250] Initialize create_changed and replace_changed --- cloud/ec2_asg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/ec2_asg.py b/cloud/ec2_asg.py index 3fc033e6d65..39c9d9c9f41 100755 --- a/cloud/ec2_asg.py +++ b/cloud/ec2_asg.py @@ -591,7 +591,7 @@ def main(): module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region)) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) - changed = False + changed = create_changed = replace_changed = False if replace_all_instances and replace_instances: module.fail_json(msg="You can't use replace_instances and replace_all_instances in the same task.") if state == 'present': From 043b38f2efa2cb787525bf11da1fb6d28525e69a Mon Sep 17 00:00:00 2001 From: Richard Glew Date: Fri, 17 Oct 2014 09:28:58 +1000 Subject: [PATCH 032/250] Fixed bug #193 where parameter name was set incorrectly --- cloud/ec2_asg.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloud/ec2_asg.py b/cloud/ec2_asg.py index 3fc033e6d65..85df6f5d67a 100755 --- a/cloud/ec2_asg.py +++ b/cloud/ec2_asg.py @@ -426,7 +426,7 @@ def replace(connection, module): batch_size = module.params.get('replace_batch_size') wait_timeout = module.params.get('wait_timeout') - group_name = module.params.get('group_name') + group_name = module.params.get('name') max_size = module.params.get('max_size') min_size = module.params.get('min_size') desired_capacity = module.params.get('desired_capacity') @@ -444,7 +444,7 @@ def replace(connection, module): time.sleep(10) if instance_wait <= time.time(): # waiting took too long - module.fail_json(msg = "Waited too for instances to appear. %s" % time.asctime()) + module.fail_json(msg = "Waited too long for instances to appear. %s" % time.asctime()) # determine if we need to continue replaceable = 0 if replace_instances: @@ -470,7 +470,7 @@ def replace(connection, module): props = get_properties(as_group) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg = "Waited too for instances to appear. %s" % time.asctime()) + module.fail_json(msg = "Waited too long for instances to appear. %s" % time.asctime()) instances = props['instances'] if replace_instances: instances = replace_instances @@ -490,7 +490,7 @@ def replace(connection, module): def replace_batch(connection, module, replace_instances): - group_name = module.params.get('group_name') + group_name = module.params.get('name') wait_timeout = int(module.params.get('wait_timeout')) lc_check = module.params.get('lc_check') From 044cef84191290613afd45e9b7c60d8626a366fd Mon Sep 17 00:00:00 2001 From: Tim Gerla Date: Fri, 17 Oct 2014 10:47:17 -0700 Subject: [PATCH 033/250] Fix a typo of a function call --- cloud/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/ec2.py b/cloud/ec2.py index a4776c74b83..a82b6a6b637 100644 --- a/cloud/ec2.py +++ b/cloud/ec2.py @@ -501,7 +501,7 @@ def _set_none_to_blank(dictionary): result = dictionary for k in result.iterkeys(): if type(result[k]) == dict: - result[k] = _set_non_to_blank(result[k]) + result[k] = _set_none_to_blank(result[k]) elif not result[k]: result[k] = "" return result From 88b73afcbe15a0c5ddbbb9c977e7c09199e47733 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 20 Oct 2014 22:25:18 -0400 Subject: [PATCH 034/250] Use original_basename to set the dest path even if src is not set --- files/file.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/files/file.py b/files/file.py index ff9feb41ee3..be7b05aaf52 100644 --- a/files/file.py +++ b/files/file.py @@ -168,14 +168,6 @@ def main(): # or copy module, even if this module never uses it, it is needed to key off some things if src is not None: src = os.path.expanduser(src) - - # original_basename is used by other modules that depend on file. - if os.path.isdir(path) and state not in ["link", "absent"]: - if params['original_basename']: - basename = params['original_basename'] - else: - basename = os.path.basename(src) - params['path'] = path = os.path.join(path, basename) else: if state in ['link','hard']: if follow: @@ -184,6 +176,16 @@ def main(): else: module.fail_json(msg='src and dest are required for creating links') + # original_basename is used by other modules that depend on file. + if os.path.isdir(path) and state not in ["link", "absent"]: + basename = None + if params['original_basename']: + basename = params['original_basename'] + elif src is not None: + basename = os.path.basename(src) + if basename: + params['path'] = path = os.path.join(path, basename) + # make sure the target path is a directory when we're doing a recursive operation recurse = params['recurse'] if recurse and state != 'directory': From 617eed6556b47c4eab4be2c308eecfb7e3e556cf Mon Sep 17 00:00:00 2001 From: CptLausebaer Date: Tue, 21 Oct 2014 18:51:13 +0200 Subject: [PATCH 035/250] correct requirement of parameter dest The parameter "dest:" is required, but it is documented as "required: false". --- source_control/git.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source_control/git.py b/source_control/git.py index a5d94e3dbbe..5ef10b440b0 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -33,7 +33,7 @@ options: description: - git, SSH, or HTTP protocol address of the git repository. dest: - required: false + required: true description: - Absolute path of where the repository should be checked out to. This parameter is required, unless C(update) is set to C(no) From 7f611468a8279d785af26852ca7dccc95bc73a41 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 22 Oct 2014 16:43:35 -0400 Subject: [PATCH 036/250] Fix for systemd service scripts with newlines Fixes #127 --- system/service.py | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index b235ee25c57..6093717bcee 100644 --- a/system/service.py +++ b/system/service.py @@ -501,7 +501,33 @@ class LinuxService(Service): (rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit,)) if rc != 0: self.module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, self.__systemd_unit, err)) - return dict(line.split('=', 1) for line in out.splitlines()) + key = None + value_buffer = [] + status_dict = {} + for line in out.splitlines(): + if not key: + key, value = line.split('=', 1) + # systemd fields that are shell commands can be multi-line + # We take a value that begins with a "{" as the start of + # a shell command and a line that ends with "}" as the end of + # the command + if value.lstrip().startswith('{'): + if value.rstrip().endswith('}'): + status_dict[key] = value + key = None + else: + value_buffer.append(value) + else: + status_dict[key] = value + key = None + else: + if line.rstrip().endswith('}'): + status_dict[key] = '\n'.join(value_buffer) + key = None + else: + value_buffer.append(value) + + return status_dict def get_systemd_service_status(self): d = self.get_systemd_status_dict() From 0a6561f5d4ecfbb970d2e6f6d0ee0bb8266e8987 Mon Sep 17 00:00:00 2001 From: Jaanus Torp Date: Thu, 23 Oct 2014 13:19:23 +0100 Subject: [PATCH 037/250] Fixed really annoying omission in the example without the task would complete without errors or warnings but delete all egress rules --- cloud/ec2_group.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/ec2_group.py b/cloud/ec2_group.py index 1c8aa701015..250095a9dcb 100644 --- a/cloud/ec2_group.py +++ b/cloud/ec2_group.py @@ -102,6 +102,7 @@ EXAMPLES = ''' - proto: tcp from_port: 80 to_port: 80 + cidr_ip: 0.0.0.0/0 group_name: example-other # description to use if example-other needs to be created group_desc: other example EC2 group @@ -339,7 +340,7 @@ def main(): cidr_ip=ip) changed = True elif vpc_id and not module.check_mode: - # when using a vpc, but no egress rules are specified, + # when using a vpc, but no egress rules are specified, # we add in a default allow all out rule, which was the # default behavior before egress rules were added default_egress_rule = 'out--1-None-None-None-0.0.0.0/0' From f4ca909d59b7733db106a0f32854d4b0f7a1bddb Mon Sep 17 00:00:00 2001 From: Adam Chainz Date: Thu, 23 Oct 2014 17:21:52 +0100 Subject: [PATCH 038/250] Update syntax for cloudformation example --- cloud/cloudformation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/cloudformation.py b/cloud/cloudformation.py index 6a7838a51b2..162c8d8cd16 100644 --- a/cloud/cloudformation.py +++ b/cloud/cloudformation.py @@ -97,7 +97,7 @@ EXAMPLES = ''' # Basic task example tasks: - name: launch ansible cloudformation example - action: cloudformation > + cloudformation: stack_name="ansible-cloudformation" state=present region=us-east-1 disable_rollback=true template=files/cloudformation-example.json From eae233efe40feb05b82425d88d74d0cd72bbb1d1 Mon Sep 17 00:00:00 2001 From: Ashish Ranjan Date: Fri, 24 Oct 2014 14:48:26 +0000 Subject: [PATCH 039/250] fields in /proc/net/tcp* are not always delimited by single space --- utilities/wait_for.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utilities/wait_for.py b/utilities/wait_for.py index 5e02712ddff..2d624282678 100644 --- a/utilities/wait_for.py +++ b/utilities/wait_for.py @@ -228,7 +228,7 @@ class LinuxTCPConnectionInfo(TCPConnectionInfo): active_connections = 0 f = open(self.source_file[self.family]) for tcp_connection in f.readlines(): - tcp_connection = tcp_connection.strip().split(' ') + tcp_connection = tcp_connection.strip().split() if tcp_connection[self.local_address_field] == 'local_address': continue if tcp_connection[self.connection_state_field] not in self.connection_states: From 142fd9b06df3b484af864ab22fa01b57590e08b6 Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Sun, 26 Oct 2014 16:26:33 -0400 Subject: [PATCH 040/250] ec2_snapshot: document wait, wait_timeout params Document the wait and wait_timeout params for ec2_snapshot. This is important because snapshots can take a long time to complete, and the module defaults to wait=yes. --- cloud/ec2_snapshot.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/cloud/ec2_snapshot.py b/cloud/ec2_snapshot.py index a37aadb95e2..4c21ae6ff7b 100644 --- a/cloud/ec2_snapshot.py +++ b/cloud/ec2_snapshot.py @@ -48,7 +48,20 @@ options: - a hash/dictionary of tags to add to the snapshot required: false version_added: "1.6" - + wait: + description: + - wait for the snapshot to be ready + choices: ['yes', 'no'] + required: false + default: yes + version_added: "1.5.1" + wait_timeout: + description: + - how long before wait gives up, in seconds + - specify 0 to wait forever + required: false + default: 0 + version_added: "1.5.1" author: Will Thames extends_documentation_fragment: aws ''' From 9fe5c2af2dcfb125398475e4ed0b740e71d70709 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 28 Oct 2014 15:10:10 -0400 Subject: [PATCH 041/250] Update desctiprion, parameter name, and default value as discussed in #55 --- source_control/git.py | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index c30ad42099d..c6fbfc569d6 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -129,15 +129,18 @@ options: - if C(no), repository will be cloned without the --recursive option, skipping sub-modules. - track_submodule_branches: + track_submodules: required: false - default: "yes" + default: "no" choices: ["yes", "no"] - version_added: "1.7" + version_added: "1.8" description: - - if C(no), submodules will be updated without the --remote - option, allowing submodules to be tracked by commit hash - instead of branch name. + - if C(yes), submodules will track the latest commit on their + master branch (or other branch specified in .gitmodules). If + C(no), submodules will be kept at the revision specified by the + main project. This is equivalent to specifying the --remote flag + to git submodule update. + notes: - "If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, @@ -409,7 +412,7 @@ def get_head_branch(git_path, module, dest, remote, bare=False): f.close() return branch -def fetch(git_path, module, repo, dest, version, remote, bare, track_submodule_branches): +def fetch(git_path, module, repo, dest, version, remote, bare, track_submodules): ''' updates repo from remote sources ''' (rc, out0, err0) = module.run_command([git_path, 'remote', 'set-url', remote, repo], cwd=dest) if rc != 0: @@ -427,10 +430,10 @@ def fetch(git_path, module, repo, dest, version, remote, bare, track_submodule_b (rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote), cwd=dest) if rc != 0: module.fail_json(msg="Failed to download remote objects and refs") - (rc, out3, err3) = submodule_update(git_path, module, dest, track_submodule_branches ) + (rc, out3, err3) = submodule_update(git_path, module, dest, track_submodules ) return (rc, out1 + out2 + out3, err1 + err2 + err3) -def submodule_update(git_path, module, dest, track_submodule_branches): +def submodule_update(git_path, module, dest, track_submodules): ''' init and update any submodules ''' # get the valid submodule params @@ -441,7 +444,7 @@ def submodule_update(git_path, module, dest, track_submodule_branches): return (0, '', '') cmd = [ git_path, 'submodule', 'sync' ] (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) - if 'remote' in params and track_submodule_branches: + if 'remote' in params and track_submodules: cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ,'--remote' ] else: cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ] @@ -451,7 +454,7 @@ def submodule_update(git_path, module, dest, track_submodule_branches): return (rc, out, err) -def switch_version(git_path, module, dest, remote, version, recursive, track_submodule_branches): +def switch_version(git_path, module, dest, remote, version, recursive, track_submodules): cmd = '' if version != 'HEAD': if is_remote_branch(git_path, module, dest, remote, version): @@ -477,7 +480,7 @@ def switch_version(git_path, module, dest, remote, version, recursive, track_sub else: module.fail_json(msg="Failed to checkout branch %s" % (branch)) if recursive: - (rc, out2, err2) = submodule_update(git_path, module, dest, track_submodule_branches) + (rc, out2, err2) = submodule_update(git_path, module, dest, track_submodules) out1 += out2 err1 += err1 return (rc, out1, err1) @@ -501,7 +504,7 @@ def main(): executable=dict(default=None), bare=dict(default='no', type='bool'), recursive=dict(default='yes', type='bool'), - track_submodule_branches=dict(default='yes', type='bool'), + track_submodules=dict(default='no', type='bool'), ), supports_check_mode=True ) @@ -546,7 +549,7 @@ def main(): add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) recursive = module.params['recursive'] - track_submodule_branches = module.params['track_submodule_branches'] + track_submodules = module.params['track_submodules'] rc, out, err, status = (0, None, None, None) @@ -592,12 +595,12 @@ def main(): module.exit_json(changed=False, before=before, after=remote_head) if module.check_mode: module.exit_json(changed=True, before=before, after=remote_head) - fetch(git_path, module, repo, dest, version, remote, bare, track_submodule_branches) + fetch(git_path, module, repo, dest, version, remote, bare, track_submodules) # switch to version specified regardless of whether # we cloned or pulled if not bare: - switch_version(git_path, module, dest, remote, version, recursive, track_submodule_branches) + switch_version(git_path, module, dest, remote, version, recursive, track_submodules) # determine if we changed anything after = get_version(module, git_path, dest) From 63e81cfc2e0c3c07245342cd41a0ba147eac55be Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 28 Oct 2014 17:57:37 -0400 Subject: [PATCH 042/250] Fix git module handling of the recursive flag Fixes: #169 --- source_control/git.py | 120 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 110 insertions(+), 10 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index c6fbfc569d6..998e684afb1 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -249,6 +249,28 @@ def get_version(module, git_path, dest, ref="HEAD"): sha = stdout.rstrip('\n') return sha +def get_submodule_versions(git_path, module, dest, version='HEAD'): + cmd = [git_path, 'submodule', 'foreach', git_path, 'rev-parse', version] + (rc, out, err) = module.run_command(cmd, cwd=dest) + if rc != 0: + module.fail_json(msg='Unable to determine hashes of submodules') + submodules = {} + subm_name = None + for line in out.splitlines(): + if line.startswith("Entering '"): + subm_name = line[10:-1] + elif len(line.strip()) == 40: + if subm_name is None: + module.fail_json() + submodules[subm_name] = line.strip() + subm_name = None + else: + module.fail_json(msg='Unable to parse submodule hash line: %s' % line.strip()) + if subm_name is not None: + module.fail_json(msg='Unable to find hash for submodule: %s' % subm_name) + + return submodules + def clone(git_path, module, repo, dest, remote, depth, version, bare, reference, recursive): ''' makes a new git repo if it does not already exist ''' @@ -412,8 +434,10 @@ def get_head_branch(git_path, module, dest, remote, bare=False): f.close() return branch -def fetch(git_path, module, repo, dest, version, remote, bare, track_submodules): +def fetch(git_path, module, repo, dest, version, remote, bare, track_submodules, recursive): ''' updates repo from remote sources ''' + out_acc = [] + err_acc = [] (rc, out0, err0) = module.run_command([git_path, 'remote', 'set-url', remote, repo], cwd=dest) if rc != 0: module.fail_json(msg="Failed to set a new url %s for %s: %s" % (repo, remote, out0 + err0)) @@ -423,6 +447,8 @@ def fetch(git_path, module, repo, dest, version, remote, bare, track_submodules) (rc, out1, err1) = module.run_command("%s fetch %s" % (git_path, remote), cwd=dest) if rc != 0: module.fail_json(msg="Failed to download remote objects and refs") + out_acc.append(out1) + err_acc.append(err1) if bare: (rc, out2, err2) = module.run_command([git_path, 'fetch', remote, '+refs/tags/*:refs/tags/*'], cwd=dest) @@ -430,8 +456,63 @@ def fetch(git_path, module, repo, dest, version, remote, bare, track_submodules) (rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote), cwd=dest) if rc != 0: module.fail_json(msg="Failed to download remote objects and refs") - (rc, out3, err3) = submodule_update(git_path, module, dest, track_submodules ) - return (rc, out1 + out2 + out3, err1 + err2 + err3) + out_acc.append(out2) + err_acc.append(err2) + + if recursive: + (rc, out3, err3) = submodule_update(git_path, module, dest, track_submodules) + if rc != 0: + module.fail_json(msg="Failed to update submodules: %s" % "".join(out3, err3)) + out_acc.append(out3) + err_acc.append(err3) + return (rc, ''.join(out_acc), ''.join(err_acc)) + + +def submodules_fetch(git_path, module, remote, track_submodules, dest): + changed = False + + if not os.path.exists(os.path.join(dest, '.gitmodules')): + # no submodules + return changed + + # Check for new submodules + gitmodules_file = open(os.path.join(dest, '.gitmodules'), 'r') + for line in gitmodules_file: + if line.strip().startswith('path'): + path = line.split('=', 1)[1].strip() + # Check that dest/path/.git exists + if not os.path.exists(os.path.join(dest, path, '.git')): + changed = True + break + + # Check for updates to existing modules + if not changed: + # Fetch updates + begin = get_submodule_versions(git_path, module, dest) + cmd = [git_path, 'submodule', 'foreach', git_path, 'fetch'] + (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) + if rc != 0: + module.fail_json(msg="Failed to fetch submodules: %s" % out + err) + + if track_submodules: + # Compare against submodule HEAD + ### FIXME: determine this from .gitmodules + version = 'master' + after = get_submodule_versions(git_path, module, dest, '%s/%s' + % (remote, version)) + if begin != after: + changed = True + else: + # Compare against the superproject's expectation + cmd = [git_path, 'submodule', 'status'] + (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) + if rc != 0: + module.fail_json(msg='Failed to retrieve submodule status: %s' % out + err) + for line in out.splitlines(): + if line[0] != ' ': + changed = True + break + return changed def submodule_update(git_path, module, dest, track_submodules): ''' init and update any submodules ''' @@ -555,6 +636,7 @@ def main(): before = None local_mods = False + repo_updated = None if gitconfig and not os.path.exists(gitconfig) or not gitconfig and not update: # if there is no git configuration, do a clone operation unless the # user requested no updates or we're doing a check mode test (in @@ -590,24 +672,42 @@ def main(): elif is_remote_tag(git_path, module, dest, repo, version): # if the remote is a tag and we have the tag locally, exit early if version in get_tags(git_path, module, dest): - module.exit_json(changed=False, before=before, after=remote_head) + repo_updated = False else: - module.exit_json(changed=False, before=before, after=remote_head) - if module.check_mode: - module.exit_json(changed=True, before=before, after=remote_head) - fetch(git_path, module, repo, dest, version, remote, bare, track_submodules) + repo_updated = False + if repo_updated is not False: + if module.check_mode: + module.exit_json(changed=True, before=before, after=remote_head) + fetch(git_path, module, repo, dest, version, remote, bare, track_submodules, recursive) + repo_updated = True # switch to version specified regardless of whether # we cloned or pulled - if not bare: + if repo_updated and not bare: switch_version(git_path, module, dest, remote, version, recursive, track_submodules) + # Deal with submodules + if recursive and not bare: + submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest) + + if module.check_mode: + if submodules_updated: + module.exit_json(changed=True, before=before, after=remote_head, submodules_changed=True) + else: + module.exit_json(changed=False, before=before, after=remote_head) + + if submodules_updated: + # Switch to version specified + submodule_update(git_path, module, dest, track_submodules) + # determine if we changed anything after = get_version(module, git_path, dest) - changed = False + changed = False if before != after or local_mods: changed = True + elif recursive and submodules_updated: + changed =True # cleanup the wrapper script if ssh_wrapper: From eac316800c4076244aee579f7bb445bb9590fedd Mon Sep 17 00:00:00 2001 From: IndyMichaelB Date: Fri, 31 Oct 2014 16:13:41 -0400 Subject: [PATCH 043/250] docfix for vsphere_guest.py Corrected parameter name from user to username in documentation --- cloud/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vsphere_guest.py b/cloud/vsphere_guest.py index a91a8199dda..f2149253ef6 100644 --- a/cloud/vsphere_guest.py +++ b/cloud/vsphere_guest.py @@ -38,7 +38,7 @@ options: description: - The virtual server name you wish to manage. required: true - user: + username: description: - Username to connect to vcenter as. required: true From 3fe48db44f87781222fe9c511da3b53fb6e71f28 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 3 Nov 2014 09:43:26 -0600 Subject: [PATCH 044/250] Fail in ec2 if exact_count is specified without using count_tag Fixes ansible/ansible#9431 --- cloud/ec2.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cloud/ec2.py b/cloud/ec2.py index a82b6a6b637..dccd0668939 100644 --- a/cloud/ec2.py +++ b/cloud/ec2.py @@ -660,6 +660,11 @@ def enforce_count(module, ec2): count_tag = module.params.get('count_tag') zone = module.params.get('zone') + # fail here if the exact count was specified without filtering + # on a tag, as this may lead to a undesired removal of instances + if exact_count and count_tag is None: + module.fail_json(msg="you must use the 'count_tag' option with exact_count") + reservations, instances = find_running_instances_by_count_tag(module, ec2, count_tag, zone) changed = None From 0c4adcb2c65d9385a94381df3d53b986cdd49ef3 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 4 Nov 2014 11:18:09 -0500 Subject: [PATCH 045/250] made subcategories for cloud modules for better organization --- cloud/{ => amazon}/cloudformation.py | 0 cloud/{ => amazon}/ec2.py | 0 cloud/{ => amazon}/ec2_ami.py | 0 cloud/{ => amazon}/ec2_ami_search.py | 0 cloud/{ => amazon}/ec2_asg.py | 0 cloud/{ => amazon}/ec2_eip.py | 0 cloud/{ => amazon}/ec2_elb.py | 0 cloud/{ => amazon}/ec2_elb_lb.py | 0 cloud/{ => amazon}/ec2_facts.py | 0 cloud/{ => amazon}/ec2_group.py | 0 cloud/{ => amazon}/ec2_key.py | 0 cloud/{ => amazon}/ec2_lc.py | 0 cloud/{ => amazon}/ec2_metric_alarm.py | 0 cloud/{ => amazon}/ec2_scaling_policy.py | 0 cloud/{ => amazon}/ec2_snapshot.py | 0 cloud/{ => amazon}/ec2_tag.py | 0 cloud/{ => amazon}/ec2_vol.py | 0 cloud/{ => amazon}/ec2_vpc.py | 0 cloud/{ => amazon}/elasticache.py | 0 cloud/{ => amazon}/rds.py | 0 cloud/{ => amazon}/rds_param_group.py | 0 cloud/{ => amazon}/rds_subnet_group.py | 0 cloud/{ => amazon}/route53.py | 0 cloud/{ => amazon}/s3.py | 0 cloud/{ => azure}/azure.py | 0 cloud/{ => digital_ocean}/digital_ocean.py | 0 cloud/{ => digital_ocean}/digital_ocean_domain.py | 0 cloud/{ => digital_ocean}/digital_ocean_sshkey.py | 0 cloud/{ => docker}/docker.py | 0 cloud/{ => docker}/docker_image.py | 0 cloud/{ => google}/gc_storage.py | 0 cloud/{ => google}/gce.py | 0 cloud/{ => google}/gce_lb.py | 0 cloud/{ => google}/gce_net.py | 0 cloud/{ => google}/gce_pd.py | 0 cloud/{ => linode}/linode.py | 0 cloud/{ => openstack}/glance_image.py | 0 cloud/{ => openstack}/keystone_user.py | 0 cloud/{ => openstack}/nova_compute.py | 0 cloud/{ => openstack}/nova_keypair.py | 0 cloud/{ => openstack}/quantum_floating_ip.py | 0 cloud/{ => openstack}/quantum_floating_ip_associate.py | 0 cloud/{ => openstack}/quantum_network.py | 0 cloud/{ => openstack}/quantum_router.py | 0 cloud/{ => openstack}/quantum_router_gateway.py | 0 cloud/{ => openstack}/quantum_router_interface.py | 0 cloud/{ => openstack}/quantum_subnet.py | 0 cloud/{ => rackspace}/rax.py | 0 cloud/{ => rackspace}/rax_cbs.py | 0 cloud/{ => rackspace}/rax_cbs_attachments.py | 0 cloud/{ => rackspace}/rax_cdb.py | 0 cloud/{ => rackspace}/rax_cdb_database.py | 0 cloud/{ => rackspace}/rax_cdb_user.py | 0 cloud/{ => rackspace}/rax_clb.py | 0 cloud/{ => rackspace}/rax_clb_nodes.py | 0 cloud/{ => rackspace}/rax_dns.py | 0 cloud/{ => rackspace}/rax_dns_record.py | 0 cloud/{ => rackspace}/rax_facts.py | 0 cloud/{ => rackspace}/rax_files.py | 0 cloud/{ => rackspace}/rax_files_objects.py | 0 cloud/{ => rackspace}/rax_identity.py | 0 cloud/{ => rackspace}/rax_keypair.py | 0 cloud/{ => rackspace}/rax_meta.py | 0 cloud/{ => rackspace}/rax_network.py | 0 cloud/{ => rackspace}/rax_queue.py | 0 cloud/{ => rackspace}/rax_scaling_group.py | 0 cloud/{ => rackspace}/rax_scaling_policy.py | 0 cloud/{ => vmware}/vsphere_guest.py | 0 68 files changed, 0 insertions(+), 0 deletions(-) rename cloud/{ => amazon}/cloudformation.py (100%) rename cloud/{ => amazon}/ec2.py (100%) rename cloud/{ => amazon}/ec2_ami.py (100%) rename cloud/{ => amazon}/ec2_ami_search.py (100%) rename cloud/{ => amazon}/ec2_asg.py (100%) rename cloud/{ => amazon}/ec2_eip.py (100%) rename cloud/{ => amazon}/ec2_elb.py (100%) rename cloud/{ => amazon}/ec2_elb_lb.py (100%) rename cloud/{ => amazon}/ec2_facts.py (100%) rename cloud/{ => amazon}/ec2_group.py (100%) rename cloud/{ => amazon}/ec2_key.py (100%) rename cloud/{ => amazon}/ec2_lc.py (100%) rename cloud/{ => amazon}/ec2_metric_alarm.py (100%) rename cloud/{ => amazon}/ec2_scaling_policy.py (100%) rename cloud/{ => amazon}/ec2_snapshot.py (100%) rename cloud/{ => amazon}/ec2_tag.py (100%) rename cloud/{ => amazon}/ec2_vol.py (100%) rename cloud/{ => amazon}/ec2_vpc.py (100%) rename cloud/{ => amazon}/elasticache.py (100%) rename cloud/{ => amazon}/rds.py (100%) rename cloud/{ => amazon}/rds_param_group.py (100%) rename cloud/{ => amazon}/rds_subnet_group.py (100%) rename cloud/{ => amazon}/route53.py (100%) rename cloud/{ => amazon}/s3.py (100%) rename cloud/{ => azure}/azure.py (100%) rename cloud/{ => digital_ocean}/digital_ocean.py (100%) rename cloud/{ => digital_ocean}/digital_ocean_domain.py (100%) rename cloud/{ => digital_ocean}/digital_ocean_sshkey.py (100%) rename cloud/{ => docker}/docker.py (100%) rename cloud/{ => docker}/docker_image.py (100%) rename cloud/{ => google}/gc_storage.py (100%) rename cloud/{ => google}/gce.py (100%) rename cloud/{ => google}/gce_lb.py (100%) rename cloud/{ => google}/gce_net.py (100%) rename cloud/{ => google}/gce_pd.py (100%) rename cloud/{ => linode}/linode.py (100%) rename cloud/{ => openstack}/glance_image.py (100%) rename cloud/{ => openstack}/keystone_user.py (100%) rename cloud/{ => openstack}/nova_compute.py (100%) rename cloud/{ => openstack}/nova_keypair.py (100%) rename cloud/{ => openstack}/quantum_floating_ip.py (100%) rename cloud/{ => openstack}/quantum_floating_ip_associate.py (100%) rename cloud/{ => openstack}/quantum_network.py (100%) rename cloud/{ => openstack}/quantum_router.py (100%) rename cloud/{ => openstack}/quantum_router_gateway.py (100%) rename cloud/{ => openstack}/quantum_router_interface.py (100%) rename cloud/{ => openstack}/quantum_subnet.py (100%) rename cloud/{ => rackspace}/rax.py (100%) rename cloud/{ => rackspace}/rax_cbs.py (100%) rename cloud/{ => rackspace}/rax_cbs_attachments.py (100%) rename cloud/{ => rackspace}/rax_cdb.py (100%) rename cloud/{ => rackspace}/rax_cdb_database.py (100%) rename cloud/{ => rackspace}/rax_cdb_user.py (100%) rename cloud/{ => rackspace}/rax_clb.py (100%) rename cloud/{ => rackspace}/rax_clb_nodes.py (100%) rename cloud/{ => rackspace}/rax_dns.py (100%) rename cloud/{ => rackspace}/rax_dns_record.py (100%) rename cloud/{ => rackspace}/rax_facts.py (100%) rename cloud/{ => rackspace}/rax_files.py (100%) rename cloud/{ => rackspace}/rax_files_objects.py (100%) rename cloud/{ => rackspace}/rax_identity.py (100%) rename cloud/{ => rackspace}/rax_keypair.py (100%) rename cloud/{ => rackspace}/rax_meta.py (100%) rename cloud/{ => rackspace}/rax_network.py (100%) rename cloud/{ => rackspace}/rax_queue.py (100%) rename cloud/{ => rackspace}/rax_scaling_group.py (100%) rename cloud/{ => rackspace}/rax_scaling_policy.py (100%) rename cloud/{ => vmware}/vsphere_guest.py (100%) diff --git a/cloud/cloudformation.py b/cloud/amazon/cloudformation.py similarity index 100% rename from cloud/cloudformation.py rename to cloud/amazon/cloudformation.py diff --git a/cloud/ec2.py b/cloud/amazon/ec2.py similarity index 100% rename from cloud/ec2.py rename to cloud/amazon/ec2.py diff --git a/cloud/ec2_ami.py b/cloud/amazon/ec2_ami.py similarity index 100% rename from cloud/ec2_ami.py rename to cloud/amazon/ec2_ami.py diff --git a/cloud/ec2_ami_search.py b/cloud/amazon/ec2_ami_search.py similarity index 100% rename from cloud/ec2_ami_search.py rename to cloud/amazon/ec2_ami_search.py diff --git a/cloud/ec2_asg.py b/cloud/amazon/ec2_asg.py similarity index 100% rename from cloud/ec2_asg.py rename to cloud/amazon/ec2_asg.py diff --git a/cloud/ec2_eip.py b/cloud/amazon/ec2_eip.py similarity index 100% rename from cloud/ec2_eip.py rename to cloud/amazon/ec2_eip.py diff --git a/cloud/ec2_elb.py b/cloud/amazon/ec2_elb.py similarity index 100% rename from cloud/ec2_elb.py rename to cloud/amazon/ec2_elb.py diff --git a/cloud/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py similarity index 100% rename from cloud/ec2_elb_lb.py rename to cloud/amazon/ec2_elb_lb.py diff --git a/cloud/ec2_facts.py b/cloud/amazon/ec2_facts.py similarity index 100% rename from cloud/ec2_facts.py rename to cloud/amazon/ec2_facts.py diff --git a/cloud/ec2_group.py b/cloud/amazon/ec2_group.py similarity index 100% rename from cloud/ec2_group.py rename to cloud/amazon/ec2_group.py diff --git a/cloud/ec2_key.py b/cloud/amazon/ec2_key.py similarity index 100% rename from cloud/ec2_key.py rename to cloud/amazon/ec2_key.py diff --git a/cloud/ec2_lc.py b/cloud/amazon/ec2_lc.py similarity index 100% rename from cloud/ec2_lc.py rename to cloud/amazon/ec2_lc.py diff --git a/cloud/ec2_metric_alarm.py b/cloud/amazon/ec2_metric_alarm.py similarity index 100% rename from cloud/ec2_metric_alarm.py rename to cloud/amazon/ec2_metric_alarm.py diff --git a/cloud/ec2_scaling_policy.py b/cloud/amazon/ec2_scaling_policy.py similarity index 100% rename from cloud/ec2_scaling_policy.py rename to cloud/amazon/ec2_scaling_policy.py diff --git a/cloud/ec2_snapshot.py b/cloud/amazon/ec2_snapshot.py similarity index 100% rename from cloud/ec2_snapshot.py rename to cloud/amazon/ec2_snapshot.py diff --git a/cloud/ec2_tag.py b/cloud/amazon/ec2_tag.py similarity index 100% rename from cloud/ec2_tag.py rename to cloud/amazon/ec2_tag.py diff --git a/cloud/ec2_vol.py b/cloud/amazon/ec2_vol.py similarity index 100% rename from cloud/ec2_vol.py rename to cloud/amazon/ec2_vol.py diff --git a/cloud/ec2_vpc.py b/cloud/amazon/ec2_vpc.py similarity index 100% rename from cloud/ec2_vpc.py rename to cloud/amazon/ec2_vpc.py diff --git a/cloud/elasticache.py b/cloud/amazon/elasticache.py similarity index 100% rename from cloud/elasticache.py rename to cloud/amazon/elasticache.py diff --git a/cloud/rds.py b/cloud/amazon/rds.py similarity index 100% rename from cloud/rds.py rename to cloud/amazon/rds.py diff --git a/cloud/rds_param_group.py b/cloud/amazon/rds_param_group.py similarity index 100% rename from cloud/rds_param_group.py rename to cloud/amazon/rds_param_group.py diff --git a/cloud/rds_subnet_group.py b/cloud/amazon/rds_subnet_group.py similarity index 100% rename from cloud/rds_subnet_group.py rename to cloud/amazon/rds_subnet_group.py diff --git a/cloud/route53.py b/cloud/amazon/route53.py similarity index 100% rename from cloud/route53.py rename to cloud/amazon/route53.py diff --git a/cloud/s3.py b/cloud/amazon/s3.py similarity index 100% rename from cloud/s3.py rename to cloud/amazon/s3.py diff --git a/cloud/azure.py b/cloud/azure/azure.py similarity index 100% rename from cloud/azure.py rename to cloud/azure/azure.py diff --git a/cloud/digital_ocean.py b/cloud/digital_ocean/digital_ocean.py similarity index 100% rename from cloud/digital_ocean.py rename to cloud/digital_ocean/digital_ocean.py diff --git a/cloud/digital_ocean_domain.py b/cloud/digital_ocean/digital_ocean_domain.py similarity index 100% rename from cloud/digital_ocean_domain.py rename to cloud/digital_ocean/digital_ocean_domain.py diff --git a/cloud/digital_ocean_sshkey.py b/cloud/digital_ocean/digital_ocean_sshkey.py similarity index 100% rename from cloud/digital_ocean_sshkey.py rename to cloud/digital_ocean/digital_ocean_sshkey.py diff --git a/cloud/docker.py b/cloud/docker/docker.py similarity index 100% rename from cloud/docker.py rename to cloud/docker/docker.py diff --git a/cloud/docker_image.py b/cloud/docker/docker_image.py similarity index 100% rename from cloud/docker_image.py rename to cloud/docker/docker_image.py diff --git a/cloud/gc_storage.py b/cloud/google/gc_storage.py similarity index 100% rename from cloud/gc_storage.py rename to cloud/google/gc_storage.py diff --git a/cloud/gce.py b/cloud/google/gce.py similarity index 100% rename from cloud/gce.py rename to cloud/google/gce.py diff --git a/cloud/gce_lb.py b/cloud/google/gce_lb.py similarity index 100% rename from cloud/gce_lb.py rename to cloud/google/gce_lb.py diff --git a/cloud/gce_net.py b/cloud/google/gce_net.py similarity index 100% rename from cloud/gce_net.py rename to cloud/google/gce_net.py diff --git a/cloud/gce_pd.py b/cloud/google/gce_pd.py similarity index 100% rename from cloud/gce_pd.py rename to cloud/google/gce_pd.py diff --git a/cloud/linode.py b/cloud/linode/linode.py similarity index 100% rename from cloud/linode.py rename to cloud/linode/linode.py diff --git a/cloud/glance_image.py b/cloud/openstack/glance_image.py similarity index 100% rename from cloud/glance_image.py rename to cloud/openstack/glance_image.py diff --git a/cloud/keystone_user.py b/cloud/openstack/keystone_user.py similarity index 100% rename from cloud/keystone_user.py rename to cloud/openstack/keystone_user.py diff --git a/cloud/nova_compute.py b/cloud/openstack/nova_compute.py similarity index 100% rename from cloud/nova_compute.py rename to cloud/openstack/nova_compute.py diff --git a/cloud/nova_keypair.py b/cloud/openstack/nova_keypair.py similarity index 100% rename from cloud/nova_keypair.py rename to cloud/openstack/nova_keypair.py diff --git a/cloud/quantum_floating_ip.py b/cloud/openstack/quantum_floating_ip.py similarity index 100% rename from cloud/quantum_floating_ip.py rename to cloud/openstack/quantum_floating_ip.py diff --git a/cloud/quantum_floating_ip_associate.py b/cloud/openstack/quantum_floating_ip_associate.py similarity index 100% rename from cloud/quantum_floating_ip_associate.py rename to cloud/openstack/quantum_floating_ip_associate.py diff --git a/cloud/quantum_network.py b/cloud/openstack/quantum_network.py similarity index 100% rename from cloud/quantum_network.py rename to cloud/openstack/quantum_network.py diff --git a/cloud/quantum_router.py b/cloud/openstack/quantum_router.py similarity index 100% rename from cloud/quantum_router.py rename to cloud/openstack/quantum_router.py diff --git a/cloud/quantum_router_gateway.py b/cloud/openstack/quantum_router_gateway.py similarity index 100% rename from cloud/quantum_router_gateway.py rename to cloud/openstack/quantum_router_gateway.py diff --git a/cloud/quantum_router_interface.py b/cloud/openstack/quantum_router_interface.py similarity index 100% rename from cloud/quantum_router_interface.py rename to cloud/openstack/quantum_router_interface.py diff --git a/cloud/quantum_subnet.py b/cloud/openstack/quantum_subnet.py similarity index 100% rename from cloud/quantum_subnet.py rename to cloud/openstack/quantum_subnet.py diff --git a/cloud/rax.py b/cloud/rackspace/rax.py similarity index 100% rename from cloud/rax.py rename to cloud/rackspace/rax.py diff --git a/cloud/rax_cbs.py b/cloud/rackspace/rax_cbs.py similarity index 100% rename from cloud/rax_cbs.py rename to cloud/rackspace/rax_cbs.py diff --git a/cloud/rax_cbs_attachments.py b/cloud/rackspace/rax_cbs_attachments.py similarity index 100% rename from cloud/rax_cbs_attachments.py rename to cloud/rackspace/rax_cbs_attachments.py diff --git a/cloud/rax_cdb.py b/cloud/rackspace/rax_cdb.py similarity index 100% rename from cloud/rax_cdb.py rename to cloud/rackspace/rax_cdb.py diff --git a/cloud/rax_cdb_database.py b/cloud/rackspace/rax_cdb_database.py similarity index 100% rename from cloud/rax_cdb_database.py rename to cloud/rackspace/rax_cdb_database.py diff --git a/cloud/rax_cdb_user.py b/cloud/rackspace/rax_cdb_user.py similarity index 100% rename from cloud/rax_cdb_user.py rename to cloud/rackspace/rax_cdb_user.py diff --git a/cloud/rax_clb.py b/cloud/rackspace/rax_clb.py similarity index 100% rename from cloud/rax_clb.py rename to cloud/rackspace/rax_clb.py diff --git a/cloud/rax_clb_nodes.py b/cloud/rackspace/rax_clb_nodes.py similarity index 100% rename from cloud/rax_clb_nodes.py rename to cloud/rackspace/rax_clb_nodes.py diff --git a/cloud/rax_dns.py b/cloud/rackspace/rax_dns.py similarity index 100% rename from cloud/rax_dns.py rename to cloud/rackspace/rax_dns.py diff --git a/cloud/rax_dns_record.py b/cloud/rackspace/rax_dns_record.py similarity index 100% rename from cloud/rax_dns_record.py rename to cloud/rackspace/rax_dns_record.py diff --git a/cloud/rax_facts.py b/cloud/rackspace/rax_facts.py similarity index 100% rename from cloud/rax_facts.py rename to cloud/rackspace/rax_facts.py diff --git a/cloud/rax_files.py b/cloud/rackspace/rax_files.py similarity index 100% rename from cloud/rax_files.py rename to cloud/rackspace/rax_files.py diff --git a/cloud/rax_files_objects.py b/cloud/rackspace/rax_files_objects.py similarity index 100% rename from cloud/rax_files_objects.py rename to cloud/rackspace/rax_files_objects.py diff --git a/cloud/rax_identity.py b/cloud/rackspace/rax_identity.py similarity index 100% rename from cloud/rax_identity.py rename to cloud/rackspace/rax_identity.py diff --git a/cloud/rax_keypair.py b/cloud/rackspace/rax_keypair.py similarity index 100% rename from cloud/rax_keypair.py rename to cloud/rackspace/rax_keypair.py diff --git a/cloud/rax_meta.py b/cloud/rackspace/rax_meta.py similarity index 100% rename from cloud/rax_meta.py rename to cloud/rackspace/rax_meta.py diff --git a/cloud/rax_network.py b/cloud/rackspace/rax_network.py similarity index 100% rename from cloud/rax_network.py rename to cloud/rackspace/rax_network.py diff --git a/cloud/rax_queue.py b/cloud/rackspace/rax_queue.py similarity index 100% rename from cloud/rax_queue.py rename to cloud/rackspace/rax_queue.py diff --git a/cloud/rax_scaling_group.py b/cloud/rackspace/rax_scaling_group.py similarity index 100% rename from cloud/rax_scaling_group.py rename to cloud/rackspace/rax_scaling_group.py diff --git a/cloud/rax_scaling_policy.py b/cloud/rackspace/rax_scaling_policy.py similarity index 100% rename from cloud/rax_scaling_policy.py rename to cloud/rackspace/rax_scaling_policy.py diff --git a/cloud/vsphere_guest.py b/cloud/vmware/vsphere_guest.py similarity index 100% rename from cloud/vsphere_guest.py rename to cloud/vmware/vsphere_guest.py From 3ed1378067d447b5235ff22972a3a6f3fced6747 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 4 Nov 2014 17:23:22 -0500 Subject: [PATCH 046/250] Some more module categorization. --- database/mysql/__init__.py | 0 database/{ => mysql}/mysql_db.py | 0 database/{ => mysql}/mysql_user.py | 0 database/{ => mysql}/mysql_variables.py | 0 database/postgresql/__init__.py | 0 database/{ => postgresql}/postgresql_db.py | 0 database/{ => postgresql}/postgresql_privs.py | 0 database/{ => postgresql}/postgresql_user.py | 0 packaging/language/__init__.py | 0 packaging/{ => language}/easy_install.py | 0 packaging/{ => language}/gem.py | 0 packaging/{ => language}/pip.py | 0 packaging/os/__init__.py | 0 packaging/{ => os}/apt.py | 0 packaging/{ => os}/apt_key.py | 0 packaging/{ => os}/apt_repository.py | 0 packaging/{ => os}/apt_rpm.py | 0 packaging/{ => os}/redhat_subscription.py | 0 packaging/{ => os}/rhn_channel.py | 0 packaging/{ => os}/rhn_register.py | 0 packaging/{ => os}/rpm_key.py | 0 packaging/{ => os}/yum.py | 0 22 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 database/mysql/__init__.py rename database/{ => mysql}/mysql_db.py (100%) rename database/{ => mysql}/mysql_user.py (100%) rename database/{ => mysql}/mysql_variables.py (100%) create mode 100644 database/postgresql/__init__.py rename database/{ => postgresql}/postgresql_db.py (100%) rename database/{ => postgresql}/postgresql_privs.py (100%) rename database/{ => postgresql}/postgresql_user.py (100%) create mode 100644 packaging/language/__init__.py rename packaging/{ => language}/easy_install.py (100%) rename packaging/{ => language}/gem.py (100%) rename packaging/{ => language}/pip.py (100%) create mode 100644 packaging/os/__init__.py rename packaging/{ => os}/apt.py (100%) rename packaging/{ => os}/apt_key.py (100%) rename packaging/{ => os}/apt_repository.py (100%) rename packaging/{ => os}/apt_rpm.py (100%) rename packaging/{ => os}/redhat_subscription.py (100%) rename packaging/{ => os}/rhn_channel.py (100%) rename packaging/{ => os}/rhn_register.py (100%) rename packaging/{ => os}/rpm_key.py (100%) rename packaging/{ => os}/yum.py (100%) diff --git a/database/mysql/__init__.py b/database/mysql/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/database/mysql_db.py b/database/mysql/mysql_db.py similarity index 100% rename from database/mysql_db.py rename to database/mysql/mysql_db.py diff --git a/database/mysql_user.py b/database/mysql/mysql_user.py similarity index 100% rename from database/mysql_user.py rename to database/mysql/mysql_user.py diff --git a/database/mysql_variables.py b/database/mysql/mysql_variables.py similarity index 100% rename from database/mysql_variables.py rename to database/mysql/mysql_variables.py diff --git a/database/postgresql/__init__.py b/database/postgresql/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/database/postgresql_db.py b/database/postgresql/postgresql_db.py similarity index 100% rename from database/postgresql_db.py rename to database/postgresql/postgresql_db.py diff --git a/database/postgresql_privs.py b/database/postgresql/postgresql_privs.py similarity index 100% rename from database/postgresql_privs.py rename to database/postgresql/postgresql_privs.py diff --git a/database/postgresql_user.py b/database/postgresql/postgresql_user.py similarity index 100% rename from database/postgresql_user.py rename to database/postgresql/postgresql_user.py diff --git a/packaging/language/__init__.py b/packaging/language/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packaging/easy_install.py b/packaging/language/easy_install.py similarity index 100% rename from packaging/easy_install.py rename to packaging/language/easy_install.py diff --git a/packaging/gem.py b/packaging/language/gem.py similarity index 100% rename from packaging/gem.py rename to packaging/language/gem.py diff --git a/packaging/pip.py b/packaging/language/pip.py similarity index 100% rename from packaging/pip.py rename to packaging/language/pip.py diff --git a/packaging/os/__init__.py b/packaging/os/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packaging/apt.py b/packaging/os/apt.py similarity index 100% rename from packaging/apt.py rename to packaging/os/apt.py diff --git a/packaging/apt_key.py b/packaging/os/apt_key.py similarity index 100% rename from packaging/apt_key.py rename to packaging/os/apt_key.py diff --git a/packaging/apt_repository.py b/packaging/os/apt_repository.py similarity index 100% rename from packaging/apt_repository.py rename to packaging/os/apt_repository.py diff --git a/packaging/apt_rpm.py b/packaging/os/apt_rpm.py similarity index 100% rename from packaging/apt_rpm.py rename to packaging/os/apt_rpm.py diff --git a/packaging/redhat_subscription.py b/packaging/os/redhat_subscription.py similarity index 100% rename from packaging/redhat_subscription.py rename to packaging/os/redhat_subscription.py diff --git a/packaging/rhn_channel.py b/packaging/os/rhn_channel.py similarity index 100% rename from packaging/rhn_channel.py rename to packaging/os/rhn_channel.py diff --git a/packaging/rhn_register.py b/packaging/os/rhn_register.py similarity index 100% rename from packaging/rhn_register.py rename to packaging/os/rhn_register.py diff --git a/packaging/rpm_key.py b/packaging/os/rpm_key.py similarity index 100% rename from packaging/rpm_key.py rename to packaging/os/rpm_key.py diff --git a/packaging/yum.py b/packaging/os/yum.py similarity index 100% rename from packaging/yum.py rename to packaging/os/yum.py From 0be8798858da17d670c75ba4760ad141bb56a432 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 4 Nov 2014 17:29:56 -0500 Subject: [PATCH 047/250] Renames. --- network/basics/__init__.py | 0 network/{ => basics}/get_url.py | 0 network/{ => basics}/slurp.py | 0 network/{ => basics}/uri.py | 0 utilities/{ => helper}/accelerate.py | 0 utilities/{ => helper}/fireball.py | 0 utilities/{ => logic}/assert.py | 0 utilities/{ => logic}/debug.py | 0 utilities/{ => logic}/fail.py | 0 utilities/{ => logic}/include_vars.py | 0 utilities/{ => logic}/pause.py | 0 utilities/{ => logic}/set_fact.py | 0 utilities/{ => logic}/wait_for.py | 0 13 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 network/basics/__init__.py rename network/{ => basics}/get_url.py (100%) rename network/{ => basics}/slurp.py (100%) rename network/{ => basics}/uri.py (100%) rename utilities/{ => helper}/accelerate.py (100%) rename utilities/{ => helper}/fireball.py (100%) rename utilities/{ => logic}/assert.py (100%) rename utilities/{ => logic}/debug.py (100%) rename utilities/{ => logic}/fail.py (100%) rename utilities/{ => logic}/include_vars.py (100%) rename utilities/{ => logic}/pause.py (100%) rename utilities/{ => logic}/set_fact.py (100%) rename utilities/{ => logic}/wait_for.py (100%) diff --git a/network/basics/__init__.py b/network/basics/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/network/get_url.py b/network/basics/get_url.py similarity index 100% rename from network/get_url.py rename to network/basics/get_url.py diff --git a/network/slurp.py b/network/basics/slurp.py similarity index 100% rename from network/slurp.py rename to network/basics/slurp.py diff --git a/network/uri.py b/network/basics/uri.py similarity index 100% rename from network/uri.py rename to network/basics/uri.py diff --git a/utilities/accelerate.py b/utilities/helper/accelerate.py similarity index 100% rename from utilities/accelerate.py rename to utilities/helper/accelerate.py diff --git a/utilities/fireball.py b/utilities/helper/fireball.py similarity index 100% rename from utilities/fireball.py rename to utilities/helper/fireball.py diff --git a/utilities/assert.py b/utilities/logic/assert.py similarity index 100% rename from utilities/assert.py rename to utilities/logic/assert.py diff --git a/utilities/debug.py b/utilities/logic/debug.py similarity index 100% rename from utilities/debug.py rename to utilities/logic/debug.py diff --git a/utilities/fail.py b/utilities/logic/fail.py similarity index 100% rename from utilities/fail.py rename to utilities/logic/fail.py diff --git a/utilities/include_vars.py b/utilities/logic/include_vars.py similarity index 100% rename from utilities/include_vars.py rename to utilities/logic/include_vars.py diff --git a/utilities/pause.py b/utilities/logic/pause.py similarity index 100% rename from utilities/pause.py rename to utilities/logic/pause.py diff --git a/utilities/set_fact.py b/utilities/logic/set_fact.py similarity index 100% rename from utilities/set_fact.py rename to utilities/logic/set_fact.py diff --git a/utilities/wait_for.py b/utilities/logic/wait_for.py similarity index 100% rename from utilities/wait_for.py rename to utilities/logic/wait_for.py From cec519f70e96f801c3a5243e96f69fe343cba0dc Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 4 Nov 2014 17:47:07 -0500 Subject: [PATCH 048/250] Move internal category to utilities to remove one more category from the left hand menu, keeping it concise. --- internal/__init__.py | 0 {internal => utilities/logic}/async_status.py | 0 {internal => utilities/logic}/async_wrapper.py | 0 3 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 internal/__init__.py rename {internal => utilities/logic}/async_status.py (100%) rename {internal => utilities/logic}/async_wrapper.py (100%) diff --git a/internal/__init__.py b/internal/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/internal/async_status.py b/utilities/logic/async_status.py similarity index 100% rename from internal/async_status.py rename to utilities/logic/async_status.py diff --git a/internal/async_wrapper.py b/utilities/logic/async_wrapper.py similarity index 100% rename from internal/async_wrapper.py rename to utilities/logic/async_wrapper.py From 19ed60196c65218f52f08977293bd9325c6569f6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 5 Nov 2014 11:06:59 -0500 Subject: [PATCH 049/250] added __init__.py to new cloud subcategories so builds work again --- cloud/amazon/__init__.py | 0 cloud/azure/__init__.py | 0 cloud/digital_ocean/__init__.py | 0 cloud/docker/__init__.py | 0 cloud/google/__init__.py | 0 cloud/linode/__init__.py | 0 cloud/openstack/__init__.py | 0 cloud/rackspace/__init__.py | 0 cloud/vmware/__init__.py | 0 9 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 cloud/amazon/__init__.py create mode 100644 cloud/azure/__init__.py create mode 100644 cloud/digital_ocean/__init__.py create mode 100644 cloud/docker/__init__.py create mode 100644 cloud/google/__init__.py create mode 100644 cloud/linode/__init__.py create mode 100644 cloud/openstack/__init__.py create mode 100644 cloud/rackspace/__init__.py create mode 100644 cloud/vmware/__init__.py diff --git a/cloud/amazon/__init__.py b/cloud/amazon/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/azure/__init__.py b/cloud/azure/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/digital_ocean/__init__.py b/cloud/digital_ocean/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/docker/__init__.py b/cloud/docker/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/google/__init__.py b/cloud/google/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/linode/__init__.py b/cloud/linode/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/openstack/__init__.py b/cloud/openstack/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/rackspace/__init__.py b/cloud/rackspace/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/vmware/__init__.py b/cloud/vmware/__init__.py new file mode 100644 index 00000000000..e69de29bb2d From c6dcd383550e933b5216f471d8771d3ec6140a23 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 5 Nov 2014 15:49:39 -0500 Subject: [PATCH 050/250] added missing init.py for utilities modules --- utilities/helper/__init__.py | 0 utilities/logic/__init__.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 utilities/helper/__init__.py create mode 100644 utilities/logic/__init__.py diff --git a/utilities/helper/__init__.py b/utilities/helper/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/utilities/logic/__init__.py b/utilities/logic/__init__.py new file mode 100644 index 00000000000..e69de29bb2d From 2970b339eb8ea6031e6153cabe45459bc2bd5754 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 5 Nov 2014 16:12:29 -0500 Subject: [PATCH 051/250] Deprecate docker_image, use the docker module to deploy docker images, or shell out to docker to call docker build if you want from a playbook. --- cloud/docker/{docker_image.py => _docker_image.py} | 1 + 1 file changed, 1 insertion(+) rename cloud/docker/{docker_image.py => _docker_image.py} (99%) diff --git a/cloud/docker/docker_image.py b/cloud/docker/_docker_image.py similarity index 99% rename from cloud/docker/docker_image.py rename to cloud/docker/_docker_image.py index e1388f20f1a..726895c8df7 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/_docker_image.py @@ -23,6 +23,7 @@ DOCUMENTATION = ''' --- module: docker_image +deprecated: "functions are being rolled into the 'docker' module" author: Pavel Antonov version_added: "1.5" short_description: manage docker images From 81cbdb6c8cf54c41ba2ee3330c968e2feea05a5c Mon Sep 17 00:00:00 2001 From: kustodian Date: Thu, 6 Nov 2014 09:46:54 +0100 Subject: [PATCH 052/250] Fixed postgresql_db failing on Python 2.4 with --check --- database/postgresql/postgresql_db.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/database/postgresql/postgresql_db.py b/database/postgresql/postgresql_db.py index 605be621601..32cc930cd98 100644 --- a/database/postgresql/postgresql_db.py +++ b/database/postgresql/postgresql_db.py @@ -281,14 +281,13 @@ def main(): elif state == "present": changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype) - module.exit_json(changed=changed,db=db) - - if state == "absent": - changed = db_delete(cursor, db) + else: + if state == "absent": + changed = db_delete(cursor, db) - elif state == "present": - changed = db_create(cursor, db, owner, template, encoding, - lc_collate, lc_ctype) + elif state == "present": + changed = db_create(cursor, db, owner, template, encoding, + lc_collate, lc_ctype) except NotSupportedError, e: module.fail_json(msg=str(e)) except Exception, e: From 6317d3a988f7269340cb7a0d105d2c671ca1cd1e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 6 Nov 2014 21:25:55 -0800 Subject: [PATCH 053/250] Move from md5 to sha1 to work on FIPS-140 enabled systems --- files/assemble.py | 21 ++++++++++++++------- files/copy.py | 15 ++++++++++----- files/fetch.py | 5 +++-- files/stat.py | 22 +++++++++++++++++++--- network/basics/get_url.py | 21 ++++++++++++++------- network/basics/uri.py | 14 +++++++------- 6 files changed, 67 insertions(+), 31 deletions(-) diff --git a/files/assemble.py b/files/assemble.py index a16431b9f52..a66c82f432a 100644 --- a/files/assemble.py +++ b/files/assemble.py @@ -153,8 +153,9 @@ def main(): ) changed = False - pathmd5 = None - destmd5 = None + path_md5 = None # Deprecated + path_hash = None + dest_hash = None src = os.path.expanduser(module.params['src']) dest = os.path.expanduser(module.params['dest']) backup = module.params['backup'] @@ -175,23 +176,29 @@ def main(): module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (e, regexp)) path = assemble_from_fragments(src, delimiter, compiled_regexp) - pathmd5 = module.md5(path) + path_hash = module.sha1(path) if os.path.exists(dest): - destmd5 = module.md5(dest) + dest_hash = module.sha1(dest) - if pathmd5 != destmd5: - if backup and destmd5 is not None: + if path_hash != dest_hash: + if backup and dest_hash is not None: module.backup_local(dest) shutil.copy(path, dest) changed = True + # Backwards compat. This won't return data if FIPS mode is active + try: + pathmd5 = module.md5(path) + except ValueError: + pathmd5 = None + os.remove(path) file_args = module.load_file_common_arguments(module.params) changed = module.set_fs_attributes_if_different(file_args, changed) # Mission complete - module.exit_json(src=src, dest=dest, md5sum=pathmd5, changed=changed, msg="OK") + module.exit_json(src=src, dest=dest, md5sum=pathmd5, checksum=path_hash, changed=changed, msg="OK") # import module snippets from ansible.module_utils.basic import * diff --git a/files/copy.py b/files/copy.py index eff46dae982..9ee8e42c31a 100644 --- a/files/copy.py +++ b/files/copy.py @@ -167,8 +167,13 @@ def main(): if not os.access(src, os.R_OK): module.fail_json(msg="Source %s not readable" % (src)) - md5sum_src = module.md5(src) - md5sum_dest = None + checksum_src = module.sha1(src) + checksum_dest = None + # Backwards compat only. This will be None in FIPS mode + try: + md5sum_src = module.md5(src) + except ValueError: + md5sum_src = None changed = False @@ -198,7 +203,7 @@ def main(): basename = original_basename dest = os.path.join(dest, basename) if os.access(dest, os.R_OK): - md5sum_dest = module.md5(dest) + checksum_dest = module.sha1(dest) else: if not os.path.exists(os.path.dirname(dest)): try: @@ -215,7 +220,7 @@ def main(): module.fail_json(msg="Destination %s not writable" % (os.path.dirname(dest))) backup_file = None - if md5sum_src != md5sum_dest or os.path.islink(dest): + if checksum_src != checksum_dest or os.path.islink(dest): try: if backup: if os.path.exists(dest): @@ -238,7 +243,7 @@ def main(): changed = False res_args = dict( - dest = dest, src = src, md5sum = md5sum_src, changed = changed + dest = dest, src = src, md5sum = md5sum_src, checksum = checksum_src, changed = changed ) if backup_file: res_args['backup_file'] = backup_file diff --git a/files/fetch.py b/files/fetch.py index 5b47d87a856..fd631e6ebe6 100644 --- a/files/fetch.py +++ b/files/fetch.py @@ -34,13 +34,14 @@ options: required: false choices: [ "yes", "no" ] default: "no" - validate_md5: + validate_checksum: version_added: "1.4" description: - - Verify that the source and destination md5sums match after the files are fetched. + - Verify that the source and destination checksums match after the files are fetched. required: false choices: [ "yes", "no" ] default: "yes" + aliases: [ "validate_md5" ] flat: version_added: "1.2" description: diff --git a/files/stat.py b/files/stat.py index fe8096516b7..644dc105fe8 100644 --- a/files/stat.py +++ b/files/stat.py @@ -36,10 +36,17 @@ options: aliases: [] get_md5: description: - - Whether to return the md5 sum of the file + - Whether to return the md5 sum of the file. Will return None if we're unable to use md5 (Common for FIPS-140 compliant systems) required: false default: yes aliases: [] + get_checksum: + description: + - Whether to return a checksum of the file (currently sha1) + required: false + default: yes + aliases: [] + version_added: "1.8" author: Bruce Pennypacker ''' @@ -72,7 +79,8 @@ def main(): argument_spec = dict( path = dict(required=True), follow = dict(default='no', type='bool'), - get_md5 = dict(default='yes', type='bool') + get_md5 = dict(default='yes', type='bool'), + get_checksum = dict(default='yes', type='bool') ), supports_check_mode = True ) @@ -81,6 +89,7 @@ def main(): path = os.path.expanduser(path) follow = module.params.get('follow') get_md5 = module.params.get('get_md5') + get_checksum = module.params.get('get_checksum') try: if follow: @@ -133,7 +142,14 @@ def main(): d['lnk_source'] = os.path.realpath(path) if S_ISREG(mode) and get_md5 and os.access(path,os.R_OK): - d['md5'] = module.md5(path) + # Will fail on FIPS-140 compliant systems + try: + d['md5'] = module.md5(path) + except ValueError: + d['md5'] = None + + if S_ISREG(mode) and get_checksum and os.access(path,os.R_OK): + d['checksum'] = module.sha1(path) try: diff --git a/network/basics/get_url.py b/network/basics/get_url.py index c3b81129a27..b0d27859420 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -154,7 +154,7 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10): if info['status'] == 304: module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', '')) - # create a temporary file and copy content to do md5-based replacement + # create a temporary file and copy content to do checksum-based replacement if info['status'] != 200: module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest) @@ -241,8 +241,8 @@ def main(): filename = url_filename(info['url']) dest = os.path.join(dest, filename) - md5sum_src = None - md5sum_dest = None + checksum_src = None + checksum_dest = None # raise an error if there is no tmpsrc file if not os.path.exists(tmpsrc): @@ -251,7 +251,7 @@ def main(): if not os.access(tmpsrc, os.R_OK): os.remove(tmpsrc) module.fail_json( msg="Source %s not readable" % (tmpsrc)) - md5sum_src = module.md5(tmpsrc) + checksum_src = module.sha1(tmpsrc) # check if there is no dest file if os.path.exists(dest): @@ -262,13 +262,13 @@ def main(): if not os.access(dest, os.R_OK): os.remove(tmpsrc) module.fail_json( msg="Destination %s not readable" % (dest)) - md5sum_dest = module.md5(dest) + checksum_dest = module.sha1(dest) else: if not os.access(os.path.dirname(dest), os.W_OK): os.remove(tmpsrc) module.fail_json( msg="Destination %s not writable" % (os.path.dirname(dest))) - if md5sum_src != md5sum_dest: + if checksum_src != checksum_dest: try: shutil.copyfile(tmpsrc, dest) except Exception, err: @@ -303,8 +303,15 @@ def main(): file_args['path'] = dest changed = module.set_fs_attributes_if_different(file_args, changed) + # Backwards compat only. We'll return None on FIPS enabled systems + try: + md5sum = module.md5(dest) + except ValueError: + md5sum = None + # Mission complete - module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum_src, + + module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum, checksum=checksum_src, sha256sum=sha256sum, changed=changed, msg=info.get('msg', '')) # import module snippets diff --git a/network/basics/uri.py b/network/basics/uri.py index 8d62463df72..95bf5c705fe 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -194,8 +194,8 @@ def write_file(module, url, dest, content): module.fail_json(msg="failed to create temporary content file: %s" % str(err)) f.close() - md5sum_src = None - md5sum_dest = None + checksum_src = None + checksum_dest = None # raise an error if there is no tmpsrc file if not os.path.exists(tmpsrc): @@ -204,7 +204,7 @@ def write_file(module, url, dest, content): if not os.access(tmpsrc, os.R_OK): os.remove(tmpsrc) module.fail_json( msg="Source %s not readable" % (tmpsrc)) - md5sum_src = module.md5(tmpsrc) + checksum_src = module.sha1(tmpsrc) # check if there is no dest file if os.path.exists(dest): @@ -215,19 +215,19 @@ def write_file(module, url, dest, content): if not os.access(dest, os.R_OK): os.remove(tmpsrc) module.fail_json( msg="Destination %s not readable" % (dest)) - md5sum_dest = module.md5(dest) + checksum_dest = module.sha1(dest) else: if not os.access(os.path.dirname(dest), os.W_OK): os.remove(tmpsrc) module.fail_json( msg="Destination dir %s not writable" % (os.path.dirname(dest))) - - if md5sum_src != md5sum_dest: + + if checksum_src != checksum_dest: try: shutil.copyfile(tmpsrc, dest) except Exception, err: os.remove(tmpsrc) module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err))) - + os.remove(tmpsrc) From 6cb8a5f3123e5aa8f415aef6ff5dedefeb94b417 Mon Sep 17 00:00:00 2001 From: Fwiffo Date: Fri, 7 Nov 2014 15:20:49 -0800 Subject: [PATCH 054/250] typo in comments: of -> or --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index a91a8199dda..54f844310f8 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -207,7 +207,7 @@ EXAMPLES = ''' hw_product_uuid: "ef50bac8-2845-40ff-81d9-675315501dac" # Remove a vm from vSphere -# The VM must be powered_off of you need to use force to force a shutdown +# The VM must be powered_off or you need to use force to force a shutdown - vsphere_guest: vcenter_hostname: vcenter.mydomain.local From e4b696890127e88d9fbd616d829a0b3f5515e731 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 8 Nov 2014 12:08:47 -0500 Subject: [PATCH 055/250] made hostname work with python 2.4 --- system/hostname.py | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/system/hostname.py b/system/hostname.py index a426b59136b..48311f07a96 100755 --- a/system/hostname.py +++ b/system/hostname.py @@ -298,31 +298,35 @@ class OpenRCStrategy(GenericStrategy): def get_permanent_hostname(self): try: - with open(self.HOSTNAME_FILE, 'r') as f: - for line in f: - line = line.strip() - if line.startswith('hostname='): - return line[10:].strip('"') - return None + f = open(self.HOSTNAME_FILE, 'r') + for line in f: + line = line.strip() + if line.startswith('hostname='): + return line[10:].strip('"') except Exception, err: - self.module.fail_json(msg="failed to read hostname: %s" % - str(err)) + self.module.fail_json(msg="failed to read hostname: %s" % str(err)) + finally: + f.close() + + return None def set_permanent_hostname(self, name): try: - with open(self.HOSTNAME_FILE, 'r') as f: - lines = [x.strip() for x in f] + f = open(self.HOSTNAME_FILE, 'r') + lines = [x.strip() for x in f] for i, line in enumerate(lines): if line.startswith('hostname='): lines[i] = 'hostname="%s"' % name break + f.close() - with open(self.HOSTNAME_FILE, 'w') as f: - f.write('\n'.join(lines) + '\n') + f = open(self.HOSTNAME_FILE, 'w') + f.write('\n'.join(lines) + '\n') except Exception, err: - self.module.fail_json(msg="failed to update hostname: %s" % - str(err)) + self.module.fail_json(msg="failed to update hostname: %s" % str(err)) + finally: + f.close() # =========================================== From 2dee52616dae041f197bcfb3c62350f7f2f7344a Mon Sep 17 00:00:00 2001 From: Francois Deppierraz Date: Mon, 10 Nov 2014 09:30:29 +0100 Subject: [PATCH 056/250] Fix a typo in nova_compute documentation string Without this patch, ansible-doc was failing this way: $ ansible-doc nova_compute Traceback (most recent call last): File "/home/francois/WORK/dev/ansible/bin/ansible-doc", line 324, in main() File "/home/francois/WORK/dev/ansible/bin/ansible-doc", line 316, in main text += get_man_text(doc) File "/home/francois/WORK/dev/ansible/bin/ansible-doc", line 112, in get_man_text desc = " ".join(opt['description']) KeyError: 'description' --- cloud/openstack/nova_compute.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/nova_compute.py b/cloud/openstack/nova_compute.py index 42c54753fb8..97488ea2e61 100644 --- a/cloud/openstack/nova_compute.py +++ b/cloud/openstack/nova_compute.py @@ -124,7 +124,7 @@ options: default: 'yes' version_added: "1.8" floating_ips: - decription: + description: - list of valid floating IPs that pre-exist to assign to this node required: false default: None From 03cf57c6f0939b60a55364dd1697716f344459de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jose=CC=81=20Moreira?= Date: Mon, 10 Nov 2014 11:24:31 +0000 Subject: [PATCH 057/250] Fixed small typo on ec2 task name --- cloud/amazon/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) mode change 100644 => 100755 cloud/amazon/ec2.py diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py old mode 100644 new mode 100755 index dccd0668939..04e419ea1f1 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -403,7 +403,7 @@ local_action: - 'i-xxxxxx' region: us-east-1 tasks: - - name: Stop the sanbox instances + - name: Stop the sandbox instances local_action: module: ec2 instance_ids: '{{ instance_ids }}' From 5257222f3385429bfcf9f0a847d826860fcb9e6e Mon Sep 17 00:00:00 2001 From: "Martijn P. Rijkeboer" Date: Mon, 10 Nov 2014 20:00:28 +0100 Subject: [PATCH 058/250] Fix whitespace around '=' in sysctl.conf. --- system/sysctl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/sysctl.py b/system/sysctl.py index acf6395f071..979051e1f8e 100644 --- a/system/sysctl.py +++ b/system/sysctl.py @@ -278,10 +278,10 @@ class SysctlModule(object): checked.append(k) if k == self.args['name']: if self.args['state'] == "present": - new_line = "%s = %s\n" % (k, self.args['value']) + new_line = "%s=%s\n" % (k, self.args['value']) self.fixed_lines.append(new_line) else: - new_line = "%s = %s\n" % (k, v) + new_line = "%s=%s\n" % (k, v) self.fixed_lines.append(new_line) if self.args['name'] not in checked and self.args['state'] == "present": From 6db6cd219e5afaf5e77dd4a1d25453f7356746e0 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Mon, 10 Nov 2014 23:45:27 -0600 Subject: [PATCH 059/250] Use new prompt detection in run_command to detect aptitude prompts Also adds flags to aptitude command when force=yes is used, in order to bypass the prompts given for untrusted packages. Fixes #31 --- packaging/os/apt.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index e5a38e538d1..834c644ef1d 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -387,6 +387,7 @@ def upgrade(m, mode="yes", force=False, default_release=None, check_arg = '' apt_cmd = None + prompt_regex = None if mode == "dist": # apt-get dist-upgrade apt_cmd = APT_GET_CMD @@ -399,12 +400,13 @@ def upgrade(m, mode="yes", force=False, default_release=None, # aptitude safe-upgrade # mode=yes # default apt_cmd = APTITUDE_CMD upgrade_command = "safe-upgrade" + prompt_regex = r"(^Do you want to ignore this warning and proceed anyway\?|^\*\*\*.*\[default=.*\])" if force: if apt_cmd == APT_GET_CMD: force_yes = '--force-yes' else: - force_yes = '' + force_yes = '--assume-yes --allow-untrusted' else: force_yes = '' @@ -419,7 +421,7 @@ def upgrade(m, mode="yes", force=False, default_release=None, if default_release: cmd += " -t '%s'" % (default_release,) - rc, out, err = m.run_command(cmd) + rc, out, err = m.run_command(cmd, prompt_regex=prompt_regex) if rc: m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out) if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out): From 83669d7edaa798ef79c94500fffa8b86a113482d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 11 Nov 2014 09:39:03 -0500 Subject: [PATCH 060/250] switched to iterator as per comments --- system/hostname.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/hostname.py b/system/hostname.py index 48311f07a96..95b28dbf69d 100755 --- a/system/hostname.py +++ b/system/hostname.py @@ -313,7 +313,7 @@ class OpenRCStrategy(GenericStrategy): def set_permanent_hostname(self, name): try: f = open(self.HOSTNAME_FILE, 'r') - lines = [x.strip() for x in f] + lines = (x.strip() for x in f) for i, line in enumerate(lines): if line.startswith('hostname='): From b9471c9cd597f23cfbb9b0eebf8a8da543495d25 Mon Sep 17 00:00:00 2001 From: Jeff Geerling Date: Tue, 11 Nov 2014 12:22:27 -0600 Subject: [PATCH 061/250] Add mention of fetch module to the copy module docs. --- files/copy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/copy.py b/files/copy.py index 9ee8e42c31a..ab480bec1f9 100644 --- a/files/copy.py +++ b/files/copy.py @@ -27,7 +27,7 @@ module: copy version_added: "historical" short_description: Copies files to remote locations. description: - - The M(copy) module copies a file on the local box to remote locations. + - The M(copy) module copies a file on the local box to remote locations. Use the M(fetch) module to copy files from remote locations to the local box. options: src: description: From d67ac4d33e054ea15ff83d2111c98a1127f53439 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 11 Nov 2014 15:09:42 -0500 Subject: [PATCH 062/250] minor fixes to wait_for to avoid tracebacks as per ansible core issue #9244 --- utilities/logic/wait_for.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/utilities/logic/wait_for.py b/utilities/logic/wait_for.py index 2d624282678..88e821cfdb5 100644 --- a/utilities/logic/wait_for.py +++ b/utilities/logic/wait_for.py @@ -170,7 +170,7 @@ class TCPConnectionInfo(object): def _get_exclude_ips(self): if self.module.params['exclude_hosts'] is None: return [] - exclude_hosts = self.module.params['exclude_hosts'].split(',') + exclude_hosts = self.module.params['exclude_hosts'] return [ _convert_host_to_hex(h)[1] for h in exclude_hosts ] def get_active_connections_count(self): @@ -221,7 +221,7 @@ class LinuxTCPConnectionInfo(TCPConnectionInfo): def _get_exclude_ips(self): if self.module.params['exclude_hosts'] is None: return [] - exclude_hosts = self.module.params['exclude_hosts'].split(',') + exclude_hosts = self.module.params['exclude_hosts'] return [ _convert_host_to_hex(h) for h in exclude_hosts ] def get_active_connections_count(self): @@ -305,7 +305,7 @@ def main(): path=dict(default=None), search_regex=dict(default=None), state=dict(default='started', choices=['started', 'stopped', 'present', 'absent', 'drained']), - exclude_hosts=dict(default=None, type='list') + exclude_hosts=dict(default=None) ), ) @@ -322,20 +322,18 @@ def main(): state = params['state'] path = params['path'] search_regex = params['search_regex'] - if params['exclude_hosts']: - exclude_hosts = params['exclude_hosts'].split(',') - else: - exclude_hosts = [] - + if isinstance(params['exclude_hosts'], basestring): + params['exclude_hosts'] = params['exclude_hosts'].split(',') + if port and path: module.fail_json(msg="port and path parameter can not both be passed to wait_for") if path and state == 'stopped': module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module") if path and state == 'drained': module.fail_json(msg="state=drained should only be used for checking a port in the wait_for module") - if exclude_hosts and state != 'drained': + if params['exclude_hosts'] is not None and state != 'drained': module.fail_json(msg="exclude_hosts should only be with state=drained") - + start = datetime.datetime.now() if delay: From 5af992899641fe33a42e01c38a189576b2c58c5e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 11 Nov 2014 15:34:55 -0500 Subject: [PATCH 063/250] let implied type do the spliting --- utilities/logic/wait_for.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/utilities/logic/wait_for.py b/utilities/logic/wait_for.py index 88e821cfdb5..ae316fe1a17 100644 --- a/utilities/logic/wait_for.py +++ b/utilities/logic/wait_for.py @@ -305,7 +305,7 @@ def main(): path=dict(default=None), search_regex=dict(default=None), state=dict(default='started', choices=['started', 'stopped', 'present', 'absent', 'drained']), - exclude_hosts=dict(default=None) + exclude_hosts=dict(default=None, type='list') ), ) @@ -322,8 +322,6 @@ def main(): state = params['state'] path = params['path'] search_regex = params['search_regex'] - if isinstance(params['exclude_hosts'], basestring): - params['exclude_hosts'] = params['exclude_hosts'].split(',') if port and path: module.fail_json(msg="port and path parameter can not both be passed to wait_for") From f287600e6c76d3eeb2a5f7609f2b00f1937ea93d Mon Sep 17 00:00:00 2001 From: James Martin Date: Tue, 11 Nov 2014 19:47:34 -0500 Subject: [PATCH 064/250] fixes #240, #274, #108 --- cloud/amazon/ec2_asg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 0c327fd133a..0e1ff259354 100755 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -199,7 +199,7 @@ except ImportError: ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity', 'health_check_period', 'health_check_type', 'launch_config_name', 'load_balancers', 'max_size', 'min_size', 'name', 'placement_group', - 'tags', 'termination_policies', 'vpc_zone_identifier') + 'termination_policies', 'vpc_zone_identifier') INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name') From d5326e6dbda4a7d2886f8d7e6cf36811ba7146c5 Mon Sep 17 00:00:00 2001 From: James Martin Date: Wed, 12 Nov 2014 11:48:06 -0500 Subject: [PATCH 065/250] Removes orphaned code. --- cloud/amazon/ec2_asg.py | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 0e1ff259354..4940d0d9dcd 100755 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -373,26 +373,6 @@ def create_autoscaling_group(connection, module): module.fail_json(msg=str(e)) - result = as_groups[0] - module.exit_json(changed=changed, name=result.name, - autoscaling_group_arn=result.autoscaling_group_arn, - availability_zones=result.availability_zones, - created_time=str(result.created_time), - default_cooldown=result.default_cooldown, - health_check_period=result.health_check_period, - health_check_type=result.health_check_type, - instance_id=result.instance_id, - instances=[instance.instance_id for instance in result.instances], - launch_config_name=result.launch_config_name, - load_balancers=result.load_balancers, - min_size=result.min_size, max_size=result.max_size, - placement_group=result.placement_group, - wait_timeout = dict(default=300), - tags=result.tags, - termination_policies=result.termination_policies, - vpc_zone_identifier=result.vpc_zone_identifier) - - def delete_autoscaling_group(connection, module): group_name = module.params.get('name') groups = connection.get_all_groups(names=[group_name]) From ae744f98d874e80f966b3a7a02f882bedc1d06d9 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Wed, 12 Nov 2014 11:04:43 -0600 Subject: [PATCH 066/250] Serialize tag attributes to ASG properties in ec2_asg Fixes #108 --- cloud/amazon/ec2_asg.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 0e1ff259354..830f5bdb92e 100755 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -245,6 +245,10 @@ def get_properties(autoscaling_group): properties['pending_instances'] += 1 properties['instance_facts'] = instance_facts properties['load_balancers'] = autoscaling_group.load_balancers + + if hasattr(autoscaling_group, "tags"): + properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags) + return properties @@ -357,6 +361,7 @@ def create_autoscaling_group(connection, module): continue if changed: connection.create_or_update_tags(asg_tags) + as_group.tags = asg_tags # handle loadbalancers separately because None != [] load_balancers = module.params.get('load_balancers') or [] From 341c7ce30745b0a957f63ca57cf2c0d50d4d48e7 Mon Sep 17 00:00:00 2001 From: James Martin Date: Wed, 12 Nov 2014 12:16:53 -0500 Subject: [PATCH 067/250] asg will now be terminated if state=absent. adds mutually exclusive options --- cloud/amazon/ec2_asg.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 4940d0d9dcd..7fcd332fc4b 100755 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -557,9 +557,13 @@ def main(): tags=dict(type='list', default=[]), health_check_period=dict(type='int', default=300), health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), - ) + ), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive = [['replace_all_instances', 'replace_instances']] ) - module = AnsibleModule(argument_spec=argument_spec) state = module.params.get('state') replace_instances = module.params.get('replace_instances') @@ -572,15 +576,15 @@ def main(): except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) changed = create_changed = replace_changed = False - if replace_all_instances and replace_instances: - module.fail_json(msg="You can't use replace_instances and replace_all_instances in the same task.") + + if state == 'present': create_changed, asg_properties=create_autoscaling_group(connection, module) - if replace_all_instances or replace_instances: - replace_changed, asg_properties=replace(connection, module) elif state == 'absent': changed = delete_autoscaling_group(connection, module) module.exit_json( changed = changed ) + if replace_all_instances or replace_instances: + replace_changed, asg_properties=replace(connection, module) if create_changed or replace_changed: changed = True module.exit_json( changed = changed, **asg_properties ) From 5356ccbb35c03b3b56029b2d6f3710916e3d7db6 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 12 Nov 2014 14:20:21 -0500 Subject: [PATCH 068/250] make sure 'present' is fully equivalente of 'installed' and remove installed from feedback in favoer of 'present' --- packaging/os/apt.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 834c644ef1d..867522badbe 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -431,7 +431,7 @@ def upgrade(m, mode="yes", force=False, default_release=None, def main(): module = AnsibleModule( argument_spec = dict( - state = dict(default='installed', choices=['installed', 'latest', 'removed', 'absent', 'present']), + state = dict(default='present', choices=['installed', 'latest', 'removed', 'absent', 'present']), update_cache = dict(default=False, aliases=['update-cache'], type='bool'), cache_valid_time = dict(type='int'), purge = dict(default=False, type='bool'), @@ -519,8 +519,8 @@ def main(): p['default_release'], dpkg_options) if p['deb']: - if p['state'] != "installed": - module.fail_json(msg="deb only supports state=installed") + if p['state'] not in ["installed", "present"]: + module.fail_json(msg="deb only supports state=present") install_deb(module, p['deb'], cache, install_recommends=install_recommends, force=force_yes, dpkg_options=p['dpkg_options']) From bbebdefacb749bc9b5203da9b96a98e1dac3354b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 12 Nov 2014 17:01:14 -0500 Subject: [PATCH 069/250] better handling of deprecated aliases --- packaging/os/apt.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 867522badbe..e7d2ce4485c 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -469,6 +469,12 @@ def main(): install_recommends = p['install_recommends'] dpkg_options = expand_dpkg_options(p['dpkg_options']) + # Deal with deprecated aliases + if p['state'] == 'installed': + p['state'] = 'present' + if p['state'] == 'removed': + p['state'] = 'absent' + try: cache = apt.Cache() if p['default_release']: @@ -519,7 +525,7 @@ def main(): p['default_release'], dpkg_options) if p['deb']: - if p['state'] not in ["installed", "present"]: + if p['state'] == 'present': module.fail_json(msg="deb only supports state=present") install_deb(module, p['deb'], cache, install_recommends=install_recommends, @@ -543,7 +549,7 @@ def main(): module.exit_json(**retvals) else: module.fail_json(**retvals) - elif p['state'] in [ 'installed', 'present' ]: + elif p['state'] == 'present': result = install(module, packages, cache, default_release=p['default_release'], install_recommends=install_recommends,force=force_yes, dpkg_options=dpkg_options) @@ -552,7 +558,7 @@ def main(): module.exit_json(**retvals) else: module.fail_json(**retvals) - elif p['state'] in [ 'removed', 'absent' ]: + elif p['state'] == 'absent': remove(module, packages, cache, p['purge'], dpkg_options) except apt.cache.LockFailedException: From a13c56831010ae5f08e418531f3fd45f1477f765 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 12 Nov 2014 17:16:02 -0500 Subject: [PATCH 070/250] corrected comparisson which had accidentally chaned in previous fixes --- packaging/os/apt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index e7d2ce4485c..ab08a06db63 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -525,7 +525,7 @@ def main(): p['default_release'], dpkg_options) if p['deb']: - if p['state'] == 'present': + if p['state'] != 'present': module.fail_json(msg="deb only supports state=present") install_deb(module, p['deb'], cache, install_recommends=install_recommends, From 08b2752080d4a80cab2b7c7ca9549e77d63c49ca Mon Sep 17 00:00:00 2001 From: John Batty Date: Thu, 13 Nov 2014 13:26:20 +0000 Subject: [PATCH 071/250] Fix get_flavor_id() when flavor_ram is specified Without this fix, _get_flavor_id() fails to find a matching flavor if both: * the flavor_ram parameter is specified * the first flavor in the list does not match. The bug is simply that the module.fail_json() call lies within the loop iterating through the flavors. This call should only be made if the loop completes and no matching flavors have been found. --- cloud/openstack/nova_compute.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/nova_compute.py b/cloud/openstack/nova_compute.py index 97488ea2e61..2b21ef86610 100644 --- a/cloud/openstack/nova_compute.py +++ b/cloud/openstack/nova_compute.py @@ -405,7 +405,7 @@ def _get_flavor_id(module, nova): if (flavor.ram >= module.params['flavor_ram'] and (not module.params['flavor_include'] or module.params['flavor_include'] in flavor.name)): return flavor.id - module.fail_json(msg = "Error finding flavor with %sMB of RAM" % module.params['flavor_ram']) + module.fail_json(msg = "Error finding flavor with %sMB of RAM" % module.params['flavor_ram']) return module.params['flavor_id'] From 1dd20197b1678f60477634e1b59fed82a82c397e Mon Sep 17 00:00:00 2001 From: sterutkb Date: Tue, 30 Sep 2014 11:20:59 +0200 Subject: [PATCH 072/250] Added support for deploying virtual machine from a virtual template --- cloud/vmware/vsphere_guest.py | 106 ++++++++++++++++++++++++++++++++-- 1 file changed, 101 insertions(+), 5 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 54f844310f8..abd8acfe755 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -67,7 +67,16 @@ options: description: - Indicate desired state of the vm. default: present - choices: ['present', 'powered_on', 'absent', 'powered_off', 'restarted', 'reconfigured'] + choices: ['present', 'powered_on', 'absent', 'powered_on', 'restarted', 'reconfigured'] + from_template: + description: + - Specifies if the VM should be deployed from a template (cannot be ran with state) + default: no + choices: ['yes', 'no'] + template_src: + description: + - Name of the source template to deploy from + default: None vm_disk: description: - A key, value list of disks and their sizes and which datastore to keep it in. @@ -181,6 +190,18 @@ EXAMPLES = ''' datacenter: MyDatacenter hostname: esx001.mydomain.local +# Deploy a guest from a template +# No reconfiguration of the destination guest is done at this stage, a reconfigure would be needed to adjust memory/cpu etc.. +- vsphere_guest: + vcenter_hostname: vcenter.mydomain.local + username: myuser + password: mypass + guest: newvm001 + from_template: yes + template_src: centosTemplate + cluster: MainCluster + resource_pool: "/Resources" + # Task to gather facts from a vSphere cluster only if the system is a VMWare guest - vsphere_guest: @@ -192,12 +213,14 @@ EXAMPLES = ''' # Typical output of a vsphere_facts run on a guest +# If vmware tools is not installed, ipadresses with return None - hw_eth0: - addresstype: "assigned" label: "Network adapter 1" macaddress: "00:22:33:33:44:55" macaddress_dash: "00-22-33-33-44-55" + ipaddresses: ['192.0.2.100', '2001:DB8:56ff:feac:4d8a'] summary: "VM Network" hw_guest_full_name: "newvm001" hw_guest_id: "rhel6_64Guest" @@ -488,6 +511,49 @@ def vmdisk_id(vm, current_datastore_name): return id_list +def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name): + vmTemplate = vsphere_client.get_vm_by_name(template_src) + vmTarget = None + + try: + cluster = [k for k, + v in vsphere_client.get_clusters().items() if v == cluster_name][0] + except IndexError, e: + vsphere_client.disconnect() + module.fail_json(msg="Cannot find Cluster named: %s" % + cluster_name) + + try: + rpmor = [k for k, v in vsphere_client.get_resource_pools( + from_mor=cluster).items() + if v == resource_pool][0] + except IndexError, e: + vsphere_client.disconnect() + module.fail_json(msg="Cannot find Resource Pool named: %s" % + resource_pool) + + try: + vmTarget = vsphere_client.get_vm_by_name(guest) + except Exception: + pass + if not vmTemplate.properties.config.template: + module.fail_json( + msg="Target %s is not a registered template" % template_src + ) + try: + if vmTarget: + changed = False + else: + vmTemplate.clone(guest, resourcepool=rpmor) + changed = True + vsphere_client.disconnect() + module.exit_json(changed=changed) + except Exception as e: + module.fail_json( + msg="Could not clone selected machine: %s" % e + ) + + def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force): spec = None changed = False @@ -618,7 +684,16 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, hfmor = dcprops.hostFolder._obj # virtualmachineFolder managed object reference - vmfmor = dcprops.vmFolder._obj + if vm_extra_config['folder']: + if vm_extra_config['folder'] not in vsphere_client._get_managed_objects(MORTypes.Folder).values(): + vsphere_client.disconnect() + module.fail_json(msg="Cannot find folder named: %s" % vm_extra_config['folder']) + + for mor, name in vsphere_client._get_managed_objects(MORTypes.Folder).iteritems(): + if name == vm_extra_config['folder']: + vmfmor = mor + else: + vmfmor = dcprops.vmFolder._obj # networkFolder managed object reference nfmor = dcprops.networkFolder._obj @@ -936,6 +1011,11 @@ def gather_facts(vm): 'hw_processor_count': vm.properties.config.hardware.numCPU, 'hw_memtotal_mb': vm.properties.config.hardware.memoryMB, } + netInfo = vm.get_property('net') + netDict = {} + if netInfo: + for net in netInfo: + netDict[net['mac_address']] = net['ip_addresses'] ifidx = 0 for entry in vm.properties.config.hardware.device: @@ -948,6 +1028,7 @@ def gather_facts(vm): 'addresstype': entry.addressType, 'label': entry.deviceInfo.label, 'macaddress': entry.macAddress, + 'ipaddresses': netDict.get(entry.macAddress, None), 'macaddress_dash': entry.macAddress.replace(':', '-'), 'summary': entry.deviceInfo.summary, } @@ -1066,6 +1147,8 @@ def main(): ], default='present'), vmware_guest_facts=dict(required=False, choices=BOOLEANS), + from_template=dict(required=False, choices=BOOLEANS), + template_src=dict(required=False, type='str'), guest=dict(required=True, type='str'), vm_disk=dict(required=False, type='dict', default={}), vm_nic=dict(required=False, type='dict', default={}), @@ -1080,7 +1163,7 @@ def main(): ), supports_check_mode=False, - mutually_exclusive=[['state', 'vmware_guest_facts']], + mutually_exclusive=[['state', 'vmware_guest_facts'],['state', 'from_template']], required_together=[ ['state', 'force'], [ @@ -1090,7 +1173,8 @@ def main(): 'vm_hardware', 'esxi' ], - ['resource_pool', 'cluster'] + ['resource_pool', 'cluster'], + ['from_template', 'resource_pool', 'template_src'] ], ) @@ -1112,6 +1196,8 @@ def main(): esxi = module.params['esxi'] resource_pool = module.params['resource_pool'] cluster = module.params['cluster'] + template_src = module.params['template_src'] + from_template = module.params['from_template'] # CONNECT TO THE SERVER viserver = VIServer() @@ -1135,7 +1221,6 @@ def main(): except Exception, e: module.fail_json( msg="Fact gather failed with exception %s" % e) - # Power Changes elif state in ['powered_on', 'powered_off', 'restarted']: state_result = power_state(vm, state, force) @@ -1183,6 +1268,17 @@ def main(): module.fail_json( msg="No such VM %s. Fact gathering requires an existing vm" % guest) + + elif from_template: + deploy_template( + vsphere_client=viserver, + esxi=esxi, + resource_pool=resource_pool, + guest=guest, + template_src=template_src, + module=module, + cluster_name=cluster + ) if state in ['restarted', 'reconfigured']: module.fail_json( msg="No such VM %s. States [" From 6d79518d67bd5b87e36d71c97dca93c19b6782d6 Mon Sep 17 00:00:00 2001 From: Stewart Rutledge Date: Thu, 13 Nov 2014 22:01:26 +0100 Subject: [PATCH 073/250] Fixed a typo on power options --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index abd8acfe755..95c937eaa55 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -67,7 +67,7 @@ options: description: - Indicate desired state of the vm. default: present - choices: ['present', 'powered_on', 'absent', 'powered_on', 'restarted', 'reconfigured'] + choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured'] from_template: description: - Specifies if the VM should be deployed from a template (cannot be ran with state) From 01db13e3adf63860bd5764f35f2668e3691ce272 Mon Sep 17 00:00:00 2001 From: Adam Miller Date: Thu, 13 Nov 2014 15:27:10 -0600 Subject: [PATCH 074/250] fix gce_net add firewall rule example --- cloud/google/gce_net.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cloud/google/gce_net.py b/cloud/google/gce_net.py index c2c0b30452d..102a73f2bd1 100644 --- a/cloud/google/gce_net.py +++ b/cloud/google/gce_net.py @@ -35,7 +35,7 @@ options: description: - the protocol:ports to allow ('tcp:80' or 'tcp:80,443' or 'tcp:80-800') required: false - default: null + default: null aliases: [] ipv4_range: description: @@ -101,15 +101,16 @@ author: Eric Johnson EXAMPLES = ''' # Simple example of creating a new network -- local_action: +- local_action: module: gce_net name: privatenet ipv4_range: '10.240.16.0/24' - + # Simple example of creating a new firewall rule -- local_action: +- local_action: module: gce_net name: privatenet + fwname: all-web-webproxy allowed: tcp:80,8080 src_tags: ["web", "proxy"] From 4dd099fe7a27c6bc436d01ae7441c548cd67cfd6 Mon Sep 17 00:00:00 2001 From: Robert Estelle Date: Sat, 8 Nov 2014 23:20:06 -0500 Subject: [PATCH 075/250] Remove incorrect executable permissions. --- cloud/amazon/ec2.py | 0 cloud/amazon/ec2_asg.py | 0 cloud/amazon/ec2_lc.py | 0 cloud/amazon/ec2_scaling_policy.py | 0 cloud/google/gce.py | 0 packaging/os/apt.py | 0 packaging/os/apt_rpm.py | 0 system/hostname.py | 0 system/mount.py | 0 9 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 cloud/amazon/ec2.py mode change 100755 => 100644 cloud/amazon/ec2_asg.py mode change 100755 => 100644 cloud/amazon/ec2_lc.py mode change 100755 => 100644 cloud/amazon/ec2_scaling_policy.py mode change 100755 => 100644 cloud/google/gce.py mode change 100755 => 100644 packaging/os/apt.py mode change 100755 => 100644 packaging/os/apt_rpm.py mode change 100755 => 100644 system/hostname.py mode change 100755 => 100644 system/mount.py diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py old mode 100755 new mode 100644 diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py old mode 100755 new mode 100644 diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py old mode 100755 new mode 100644 diff --git a/cloud/amazon/ec2_scaling_policy.py b/cloud/amazon/ec2_scaling_policy.py old mode 100755 new mode 100644 diff --git a/cloud/google/gce.py b/cloud/google/gce.py old mode 100755 new mode 100644 diff --git a/packaging/os/apt.py b/packaging/os/apt.py old mode 100755 new mode 100644 diff --git a/packaging/os/apt_rpm.py b/packaging/os/apt_rpm.py old mode 100755 new mode 100644 diff --git a/system/hostname.py b/system/hostname.py old mode 100755 new mode 100644 diff --git a/system/mount.py b/system/mount.py old mode 100755 new mode 100644 From 76fc436b0895b3f08f55bfb392da09b9beecaca5 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 29 Oct 2014 20:12:49 +0100 Subject: [PATCH 076/250] Allow foo=1.0* like expressions in apt --- packaging/os/apt.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index ab08a06db63..def129caa00 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -229,7 +229,8 @@ def expand_dpkg_options(dpkg_options_compressed): def expand_pkgspec_from_fnmatches(m, pkgspec, cache): new_pkgspec = [] - for pkgname_or_fnmatch_pattern in pkgspec: + for name_or_fnmatch_or_version in pkgspec: + pkgname_or_fnmatch_pattern = name_or_fnmatch_or_version.split("=")[0] # note that any of these chars is not allowed in a (debian) pkgname if [c for c in pkgname_or_fnmatch_pattern if c in "*?[]!"]: if "=" in pkgname_or_fnmatch_pattern: @@ -249,7 +250,7 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache): else: new_pkgspec.extend(matches) else: - new_pkgspec.append(pkgname_or_fnmatch_pattern) + new_pkgspec.append(name_or_fnmatch_or_version) return new_pkgspec def install(m, pkgspec, cache, upgrade=False, default_release=None, From f475769d3a292494964a7c5d1a224281ab22be03 Mon Sep 17 00:00:00 2001 From: Michael Vogt Date: Wed, 29 Oct 2014 20:44:44 +0100 Subject: [PATCH 077/250] add apt unittest --- packaging/os/apt.py | 8 +++++--- tests/__init__.py | 0 tests/test_apt.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+), 3 deletions(-) create mode 100644 tests/__init__.py create mode 100644 tests/test_apt.py diff --git a/packaging/os/apt.py b/packaging/os/apt.py index def129caa00..459aaaa97a9 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -230,10 +230,10 @@ def expand_dpkg_options(dpkg_options_compressed): def expand_pkgspec_from_fnmatches(m, pkgspec, cache): new_pkgspec = [] for name_or_fnmatch_or_version in pkgspec: - pkgname_or_fnmatch_pattern = name_or_fnmatch_or_version.split("=")[0] + pkgname_or_fnmatch_pattern, version = package_split(name_or_fnmatch_or_version) # note that any of these chars is not allowed in a (debian) pkgname if [c for c in pkgname_or_fnmatch_pattern if c in "*?[]!"]: - if "=" in pkgname_or_fnmatch_pattern: + if version: m.fail_json(msg="pkgname wildcard and version can not be mixed") # handle multiarch pkgnames, the idea is that "apt*" should # only select native packages. But "apt*:i386" should still work @@ -568,4 +568,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +# FIXME: if __name__ == "__main__": ? +if "ANSIBLE_IN_HAPPY_UNITTEST_LAND" not in os.environ: + main() diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/test_apt.py b/tests/test_apt.py new file mode 100644 index 00000000000..10e3583801f --- /dev/null +++ b/tests/test_apt.py @@ -0,0 +1,42 @@ +import collections +import mock +import os +import unittest + +# FIXME: this is not super elegant +os.environ["ANSIBLE_IN_HAPPY_UNITTEST_LAND"] = "1" +from packaging.apt import ( + expand_pkgspec_from_fnmatches, +) + + +class AptExpandPkgspecTestCase(unittest.TestCase): + + def setUp(self): + FakePackage = collections.namedtuple("Package", ("name",)) + self.fake_cache = [ FakePackage("apt"), + FakePackage("apt-utils"), + ] + + def test_trivil(self): + foo = ["apt"] + self.assertEqual( + expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo) + + def test_bug_28(self): + foo = ["apt=1.0*"] + self.assertEqual( + expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo) + + def test_pkgname_wildcard_version_wildcard_fails(self): + foo = ["apt*=1.0*"] + m_mock = mock.Mock() + expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache) + self.assertTrue(m_mock.fail_json.called) + + def test_pkgname_expands(self): + foo = ["apt*"] + m_mock = mock.Mock() + self.assertEqual( + expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache), + ["apt", "apt-utils"]) From 885b60f31f5a87ff28882f0d5f7dd5e8f115db8f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 13 Nov 2014 10:32:38 -0800 Subject: [PATCH 078/250] if __name__ does work with ansible modules --- packaging/os/apt.py | 3 +-- tests/test_apt.py | 2 -- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 459aaaa97a9..d732619b94e 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -568,6 +568,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -# FIXME: if __name__ == "__main__": ? -if "ANSIBLE_IN_HAPPY_UNITTEST_LAND" not in os.environ: +if __name__ == "__main__": main() diff --git a/tests/test_apt.py b/tests/test_apt.py index 10e3583801f..c5802227759 100644 --- a/tests/test_apt.py +++ b/tests/test_apt.py @@ -3,8 +3,6 @@ import mock import os import unittest -# FIXME: this is not super elegant -os.environ["ANSIBLE_IN_HAPPY_UNITTEST_LAND"] = "1" from packaging.apt import ( expand_pkgspec_from_fnmatches, ) From 3e7ffc3210048b2ec5c10d8a0a4b1eb203aea4ca Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 13 Nov 2014 10:33:08 -0800 Subject: [PATCH 079/250] Correct trivial spelling. --- tests/test_apt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_apt.py b/tests/test_apt.py index c5802227759..a7eb3d3feec 100644 --- a/tests/test_apt.py +++ b/tests/test_apt.py @@ -16,7 +16,7 @@ class AptExpandPkgspecTestCase(unittest.TestCase): FakePackage("apt-utils"), ] - def test_trivil(self): + def test_trivial(self): foo = ["apt"] self.assertEqual( expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo) From 10fd0f7073959d7fc166a1b3ae58f9c634ecf936 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 13 Nov 2014 10:53:25 -0800 Subject: [PATCH 080/250] Just make things a little more readable --- packaging/os/apt.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index d732619b94e..5e02016ecac 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -167,7 +167,7 @@ except ImportError: HAS_PYTHON_APT = False def package_split(pkgspec): - parts = pkgspec.split('=') + parts = pkgspec.split('=', 1) if len(parts) > 1: return parts[0], parts[1] else: @@ -229,28 +229,28 @@ def expand_dpkg_options(dpkg_options_compressed): def expand_pkgspec_from_fnmatches(m, pkgspec, cache): new_pkgspec = [] - for name_or_fnmatch_or_version in pkgspec: - pkgname_or_fnmatch_pattern, version = package_split(name_or_fnmatch_or_version) + for pkgspec_pattern in pkgspec: + pkgname_pattern, version = package_split(pkgspec_pattern) # note that any of these chars is not allowed in a (debian) pkgname - if [c for c in pkgname_or_fnmatch_pattern if c in "*?[]!"]: + if frozenset('*?[]!').intersection(pkgname_pattern): if version: m.fail_json(msg="pkgname wildcard and version can not be mixed") # handle multiarch pkgnames, the idea is that "apt*" should # only select native packages. But "apt*:i386" should still work - if not ":" in pkgname_or_fnmatch_pattern: + if not ":" in pkgname_pattern: matches = fnmatch.filter( [pkg.name for pkg in cache - if not ":" in pkg.name], pkgname_or_fnmatch_pattern) + if not ":" in pkg.name], pkgname_pattern) else: matches = fnmatch.filter( - [pkg.name for pkg in cache], pkgname_or_fnmatch_pattern) + [pkg.name for pkg in cache], pkgname_pattern) if len(matches) == 0: - m.fail_json(msg="No package(s) matching '%s' available" % str(pkgname_or_fnmatch_pattern)) + m.fail_json(msg="No package(s) matching '%s' available" % str(pkgname_pattern)) else: new_pkgspec.extend(matches) else: - new_pkgspec.append(name_or_fnmatch_or_version) + new_pkgspec.append(pkgspec_pattern) return new_pkgspec def install(m, pkgspec, cache, upgrade=False, default_release=None, From bc18c9dc782969fecd0d1d5bf71eeadddd4da1b2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 13 Nov 2014 11:20:37 -0800 Subject: [PATCH 081/250] Cache pkg name list so we don't recreate the list for every package --- packaging/os/apt.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 5e02016ecac..7940317eb78 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -235,21 +235,27 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache): if frozenset('*?[]!').intersection(pkgname_pattern): if version: m.fail_json(msg="pkgname wildcard and version can not be mixed") + # handle multiarch pkgnames, the idea is that "apt*" should # only select native packages. But "apt*:i386" should still work if not ":" in pkgname_pattern: - matches = fnmatch.filter( - [pkg.name for pkg in cache - if not ":" in pkg.name], pkgname_pattern) + try: + pkg_name_cache = _non_multiarch + except NameError: + pkg_name_cache = _non_multiarch = [pkg.name for pkg in cache if not ':' in pkg.name] else: - matches = fnmatch.filter( - [pkg.name for pkg in cache], pkgname_pattern) + try: + pkg_name_cache = _all_pkg_names + except NameError: + pkg_name_cache = _all_pkg_names = [pkg.name for pkg in cache] + matches = fnmatch.filter(pkg_name_cache, pkgname_pattern) if len(matches) == 0: m.fail_json(msg="No package(s) matching '%s' available" % str(pkgname_pattern)) else: new_pkgspec.extend(matches) else: + # No wildcards in name new_pkgspec.append(pkgspec_pattern) return new_pkgspec From ccedf0dd4027843a7ba78dacfb30954404ffdd8c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 13 Nov 2014 11:28:50 -0800 Subject: [PATCH 082/250] Decide to allow pkgname and version wildcards so that things like libxml2*=2.9* would work --- packaging/os/apt.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 7940317eb78..17ce8aa4f78 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -231,11 +231,9 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache): new_pkgspec = [] for pkgspec_pattern in pkgspec: pkgname_pattern, version = package_split(pkgspec_pattern) - # note that any of these chars is not allowed in a (debian) pkgname - if frozenset('*?[]!').intersection(pkgname_pattern): - if version: - m.fail_json(msg="pkgname wildcard and version can not be mixed") + # note that none of these chars is allowed in a (debian) pkgname + if frozenset('*?[]!').intersection(pkgname_pattern): # handle multiarch pkgnames, the idea is that "apt*" should # only select native packages. But "apt*:i386" should still work if not ":" in pkgname_pattern: From e1ecc5ca8e8b991da2b5f683ca23e6997c993b62 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 13 Nov 2014 16:24:21 -0800 Subject: [PATCH 083/250] Update package_status and install to account for wildcarded versions. --- packaging/os/apt.py | 38 ++++++++++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 6 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 17ce8aa4f78..f1c5f0c23fe 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -205,19 +205,34 @@ def package_status(m, pkgname, version, cache, state): # assume older version of python-apt is installed package_is_installed = pkg.isInstalled - if version and package_is_installed: + if version: try: installed_version = pkg.installed.version except AttributeError: installed_version = pkg.installedVersion - return package_is_installed and fnmatch.fnmatch(installed_version, version), False, has_files + + avail_upgrades = fnmatch.filter((p.version for p in pkg.versions), version) + + if package_is_installed: + # Only claim the package is installed if the version is matched as well + package_is_installed = fnmatch.fnmatch(installed_version, version) + + # Only claim the package is upgradable if a candidate matches the version + package_is_upgradable = False + for candidate in avail_upgrades: + if pkg.versions[candidate] > p.installed: + package_is_upgradable = True + break + else: + package_is_upgradable = bool(avail_upgrades) else: try: package_is_upgradable = pkg.is_upgradable except AttributeError: # assume older version of python-apt is installed package_is_upgradable = pkg.isUpgradable - return package_is_installed, package_is_upgradable, has_files + + return package_is_installed, package_is_upgradable, has_files def expand_dpkg_options(dpkg_options_compressed): options_list = dpkg_options_compressed.split(',') @@ -260,13 +275,23 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache): def install(m, pkgspec, cache, upgrade=False, default_release=None, install_recommends=True, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): + pkg_list = [] packages = "" pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) for package in pkgspec: name, version = package_split(package) installed, upgradable, has_files = package_status(m, name, version, cache, state='install') if not installed or (upgrade and upgradable): - packages += "'%s' " % package + pkg_list.append("'%s'" % package) + if installed and upgradable and version: + # This happens when the package is installed, a newer version is + # available, and the version is a wildcard that matches both + # + # We do not apply the upgrade flag because we cannot specify both + # a version and state=latest. (This behaviour mirrors how apt + # treats a version with wildcard in the package) + pkg_list.append("'%s'" % package) + packages = ' '.join(pkg_list) if len(packages) != 0: if force: @@ -355,13 +380,14 @@ def install_deb(m, debs, cache, force, install_recommends, dpkg_options): def remove(m, pkgspec, cache, purge=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): - packages = "" + pkg_list = [] pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) for package in pkgspec: name, version = package_split(package) installed, upgradable, has_files = package_status(m, name, version, cache, state='remove') if installed or (has_files and purge): - packages += "'%s' " % package + pkg_list.append("'%s'" % package) + packages = ' '.join(pkg_list) if len(packages) == 0: m.exit_json(changed=False) From 0c312e417a2d8e4eb90482df3618bbb935507a6c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 13 Nov 2014 17:14:38 -0800 Subject: [PATCH 084/250] Moving unittests to the main ansible repo for now --- tests/__init__.py | 0 tests/test_apt.py | 40 ---------------------------------------- 2 files changed, 40 deletions(-) delete mode 100644 tests/__init__.py delete mode 100644 tests/test_apt.py diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/tests/test_apt.py b/tests/test_apt.py deleted file mode 100644 index a7eb3d3feec..00000000000 --- a/tests/test_apt.py +++ /dev/null @@ -1,40 +0,0 @@ -import collections -import mock -import os -import unittest - -from packaging.apt import ( - expand_pkgspec_from_fnmatches, -) - - -class AptExpandPkgspecTestCase(unittest.TestCase): - - def setUp(self): - FakePackage = collections.namedtuple("Package", ("name",)) - self.fake_cache = [ FakePackage("apt"), - FakePackage("apt-utils"), - ] - - def test_trivial(self): - foo = ["apt"] - self.assertEqual( - expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo) - - def test_bug_28(self): - foo = ["apt=1.0*"] - self.assertEqual( - expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo) - - def test_pkgname_wildcard_version_wildcard_fails(self): - foo = ["apt*=1.0*"] - m_mock = mock.Mock() - expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache) - self.assertTrue(m_mock.fail_json.called) - - def test_pkgname_expands(self): - foo = ["apt*"] - m_mock = mock.Mock() - self.assertEqual( - expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache), - ["apt", "apt-utils"]) From c6522620c562d24031ad32187de83c3768df3c77 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 14 Nov 2014 08:46:32 -0800 Subject: [PATCH 085/250] Clean up the docker module just a little so that we can unittest the get_split_image_tag function --- cloud/docker/docker.py | 55 ++++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index a0a52ffc756..5763e346779 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -336,10 +336,11 @@ try: except ImportError, e: HAS_DOCKER_PY = False -try: - from docker.errors import APIError as DockerAPIError -except ImportError: - from docker.client import APIError as DockerAPIError +if HAS_DOCKER_PY: + try: + from docker.errors import APIError as DockerAPIError + except ImportError: + from docker.client import APIError as DockerAPIError def _human_to_bytes(number): @@ -369,6 +370,25 @@ def _docker_id_quirk(inspect): del inspect['ID'] return inspect + +def get_split_image_tag(image): + # If image contains a host or org name, omit that from our check + if '/' in image: + registry, resource = image.rsplit('/', 1) + else: + registry, resource = None, image + + # now we can determine if image has a tag + if ':' in resource: + resource, tag = resource.split(':', 1) + if registry: + resource = '/'.join((registry, resource)) + else: + tag = "latest" + resource = image + + return resource, tag + class DockerManager: counters = {'created':0, 'started':0, 'stopped':0, 'killed':0, 'removed':0, 'restarted':0, 'pull':0} @@ -505,24 +525,6 @@ class DockerManager: return binds - def get_split_image_tag(self, image): - # If image contains a host or org name, omit that from our check - if '/' in image: - registry, resource = image.rsplit('/', 1) - else: - registry, resource = None, image - - # now we can determine if image has a tag - if ':' in resource: - resource, tag = resource.split(':', 1) - if registry: - resource = '/'.join((registry, resource)) - else: - tag = "latest" - resource = image - - return resource, tag - def get_summary_counters_msg(self): msg = "" for k, v in self.counters.iteritems(): @@ -562,10 +564,10 @@ class DockerManager: # if we weren't given a tag with the image, we need to only compare on the image name, as that # docker will give us back the full image name including a tag in the container list if one exists. - image, tag = self.get_split_image_tag(image) + image, tag = get_split_image_tag(image) for i in self.client.containers(all=True): - running_image, running_tag = self.get_split_image_tag(i['Image']) + running_image, running_tag = get_split_image_tag(i['Image']) running_command = i['Command'].strip() name_matches = False @@ -623,7 +625,7 @@ class DockerManager: containers = do_create(count, params) except: resource = self.module.params.get('image') - image, tag = self.get_split_image_tag(resource) + image, tag = get_split_image_tag(resource) if self.module.params.get('username'): try: self.client.login( @@ -851,4 +853,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() From c389cd671f3712a3b330e302dc10ea6e5e7b5605 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 14 Nov 2014 09:43:38 -0800 Subject: [PATCH 086/250] Documentation update for apt version wildcards from bryanlarsen --- packaging/os/apt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index f1c5f0c23fe..1bb52da4c8d 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -29,7 +29,7 @@ version_added: "0.0.2" options: name: description: - - A package name, like C(foo), or package specifier with version, like C(foo=1.0). Wildcards (fnmatch) like apt* are also supported. + - A package name, like C(foo), or package specifier with version, like C(foo=1.0). Name wildcards (fnmatch) like C(apt*) and version wildcards like C(foo=1.0*) are also supported. required: false default: null state: From fb4854ebcbc35b3038530de91a472ef7d0b7b710 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 14 Nov 2014 10:01:30 -0800 Subject: [PATCH 087/250] Fix retrieval of package version in apt module --- packaging/os/apt.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 1bb52da4c8d..5c557900b76 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -206,14 +206,14 @@ def package_status(m, pkgname, version, cache, state): package_is_installed = pkg.isInstalled if version: - try: - installed_version = pkg.installed.version - except AttributeError: - installed_version = pkg.installedVersion - avail_upgrades = fnmatch.filter((p.version for p in pkg.versions), version) if package_is_installed: + try: + installed_version = pkg.installed.version + except AttributeError: + installed_version = pkg.installedVersion + # Only claim the package is installed if the version is matched as well package_is_installed = fnmatch.fnmatch(installed_version, version) From ce02d596510fdac470b3d89007e79a90b921da80 Mon Sep 17 00:00:00 2001 From: Henry Finucane Date: Fri, 14 Nov 2014 16:27:20 -0800 Subject: [PATCH 088/250] If force=true, ignore python-apt's advice This lets you downgrade packages, for instance. --- packaging/os/apt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 5c557900b76..c18661d19df 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -333,7 +333,7 @@ def install_deb(m, debs, cache, force, install_recommends, dpkg_options): if pkg.compare_to_version_in_cache() == pkg.VERSION_SAME: continue # Check if package is installable - if not pkg.check(): + if not pkg.check() and not force: m.fail_json(msg=pkg._failure_string) # add any missing deps to the list of deps we need From 6f6eca5b4e91b08b22b6e3e74db95723ac10f1a7 Mon Sep 17 00:00:00 2001 From: Nejc Zupan Date: Sat, 15 Nov 2014 16:12:46 -0500 Subject: [PATCH 089/250] digital ocean: Remove unsupported states --- cloud/digital_ocean/digital_ocean_domain.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/digital_ocean/digital_ocean_domain.py b/cloud/digital_ocean/digital_ocean_domain.py index d0615ad0df0..88b4af1c5e4 100644 --- a/cloud/digital_ocean/digital_ocean_domain.py +++ b/cloud/digital_ocean/digital_ocean_domain.py @@ -27,7 +27,7 @@ options: description: - Indicate desired state of the target. default: present - choices: ['present', 'active', 'absent', 'deleted'] + choices: ['present', 'absent'] client_id: description: - DigitalOcean manager id. @@ -181,7 +181,7 @@ def core(module): if not domain: domain = Domain.find(name=getkeyordie("name")) - + if not domain: domain = Domain.add(getkeyordie("name"), getkeyordie("ip")) @@ -217,7 +217,7 @@ def core(module): def main(): module = AnsibleModule( argument_spec = dict( - state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'), + state = dict(choices=['present', 'absent'], default='present'), client_id = dict(aliases=['CLIENT_ID'], no_log=True), api_key = dict(aliases=['API_KEY'], no_log=True), name = dict(type='str'), From 7b875dbda58e5ca8cd28c1d3c253d21cf2824b25 Mon Sep 17 00:00:00 2001 From: Sam Kottler Date: Sat, 15 Nov 2014 16:21:07 -0500 Subject: [PATCH 090/250] Remove trailing whitespace in digital_ocean_domain --- cloud/digital_ocean/digital_ocean_domain.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/digital_ocean/digital_ocean_domain.py b/cloud/digital_ocean/digital_ocean_domain.py index 88b4af1c5e4..1086a6bab16 100644 --- a/cloud/digital_ocean/digital_ocean_domain.py +++ b/cloud/digital_ocean/digital_ocean_domain.py @@ -145,7 +145,7 @@ class Domain(JsonfyMixIn): return False domains = Domain.list_all() - + if id is not None: for domain in domains: if domain.id == id: @@ -203,10 +203,10 @@ def core(module): domain = None if "id" in module.params: domain = Domain.find(id=module.params["id"]) - + if not domain and "name" in module.params: domain = Domain.find(name=module.params["name"]) - + if not domain: module.exit_json(changed=False, msg="Domain not found.") From 5a628aacd4ea6df646fc40f71a677a52e5f343e5 Mon Sep 17 00:00:00 2001 From: IndyMichaelB Date: Fri, 31 Oct 2014 16:13:41 -0400 Subject: [PATCH 091/250] docfix for vsphere_guest.py Corrected parameter name from user to username in documentation --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 54f844310f8..cbdc0bb5661 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -38,7 +38,7 @@ options: description: - The virtual server name you wish to manage. required: true - user: + username: description: - Username to connect to vcenter as. required: true From 8b8008569fd6a9c2ec4603dedbe27cbd000bdb36 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Mon, 17 Nov 2014 10:31:54 -0500 Subject: [PATCH 092/250] fixes user module for rhel5 by using -n instead of -N, all other distros will still use -N --- system/user.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/system/user.py b/system/user.py index 551384a7a67..6fe20122e91 100644 --- a/system/user.py +++ b/system/user.py @@ -299,7 +299,15 @@ class User(object): # exists with the same name as the user to prevent # errors from useradd trying to create a group when # USERGROUPS_ENAB is set in /etc/login.defs. - cmd.append('-N') + if os.path.exists('/etc/redhat-release'): + dist = platform.dist() + major_release = int(dist[1].split('.')[0]) + if major_release <= 5: + cmd.append('-n') + else: + cmd.append('-N') + else: + cmd.append('-N') if self.groups is not None and len(self.groups): groups = self.get_groups_set() From f49120598f31e280b5b26bd6c774adf7520063f0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 17 Nov 2014 15:03:49 -0500 Subject: [PATCH 093/250] service now detects systemd is actually running, not just installed --- system/service.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/system/service.py b/system/service.py index 6093717bcee..61915709703 100644 --- a/system/service.py +++ b/system/service.py @@ -394,8 +394,24 @@ class LinuxService(Service): location[binary] = self.module.get_bin_path(binary) def check_systemd(name): - # verify service is managed by systemd - if not location.get('systemctl', None): + # verify systemd is installed (by finding systemctl) + if not location.get('systemctl', False): + return False + + systemd_enabled = False + # Check if init is the systemd command, using comm as cmdline could be symlink + try: + f = open('/proc/1/comm', 'r') + except IOError, err: + # If comm doesn't exist, old kernel, no systemd + return False + + for line in f: + if 'systemd' in line: + systemd_enabled = True + break + + if not systemd_enabled: return False # default to .service if the unit type is not specified From 29d211e7ab74300e9f96bced8e14fcb038eb7b43 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 17 Nov 2014 22:32:16 -0500 Subject: [PATCH 094/250] service cleanup --- system/service.py | 96 +++++++++++++++++++++-------------------------- 1 file changed, 43 insertions(+), 53 deletions(-) diff --git a/system/service.py b/system/service.py index 61915709703..13ee58c9538 100644 --- a/system/service.py +++ b/system/service.py @@ -393,7 +393,12 @@ class LinuxService(Service): for binary in binaries: location[binary] = self.module.get_bin_path(binary) - def check_systemd(name): + for initdir in initpaths: + initscript = "%s/%s" % (initdir,self.name) + if os.path.isfile(initscript): + self.svc_initscript = initscript + + def check_systemd(name, initscript): # verify systemd is installed (by finding systemctl) if not location.get('systemctl', False): return False @@ -437,46 +442,25 @@ class LinuxService(Service): if line.startswith(template_name): self.__systemd_unit = name return True + + # systemd also handles init scripts (and is enabled at this point) + if initscript: + return True + return False - # Locate a tool for enable options - if location.get('chkconfig', None) and os.path.exists("/etc/init.d/%s" % self.name): - if check_systemd(self.name): - # service is managed by systemd - self.enable_cmd = location['systemctl'] - else: - # we are using a standard SysV service - self.enable_cmd = location['chkconfig'] - elif location.get('update-rc.d', None): - if check_systemd(self.name): - # service is managed by systemd - self.enable_cmd = location['systemctl'] - elif location['initctl'] and os.path.exists("/etc/init/%s.conf" % self.name): - # service is managed by upstart - self.enable_cmd = location['initctl'] - elif location['update-rc.d'] and os.path.exists("/etc/init.d/%s" % self.name): - # service is managed by with SysV init scripts, but with update-rc.d - self.enable_cmd = location['update-rc.d'] - else: - self.module.fail_json(msg="service not found: %s" % self.name) - elif location.get('rc-service', None) and not location.get('systemctl', None): - # service is managed by OpenRC - self.svc_cmd = location['rc-service'] - self.enable_cmd = location['rc-update'] - return - elif check_systemd(self.name): + # Locate a tool to enable/disable a service + if check_systemd(self.name, self.svc_initscript): # service is managed by systemd self.enable_cmd = location['systemctl'] + self.svc_cmd = location['systemctl'] + elif location['initctl'] and os.path.exists("/etc/init/%s.conf" % self.name): # service is managed by upstart self.enable_cmd = location['initctl'] - - # if this service is managed via upstart, get the current upstart version - if self.enable_cmd == location['initctl']: - # default the upstart version to something we can compare against + # set the upstart version based on the output of 'initctl version' self.upstart_version = LooseVersion('0.0.0') try: - # set the upstart version based on the output of 'initctl version' version_re = re.compile(r'\(upstart (.*)\)') rc,stdout,stderr = self.module.run_command('initctl version') if rc == 0: @@ -484,33 +468,39 @@ class LinuxService(Service): if res: self.upstart_version = LooseVersion(res.groups()[0]) except: - # we'll use the default of 0.0.0 since we couldn't - # detect the current upstart version above - pass + pass # we'll use the default of 0.0.0 - # Locate a tool for runtime service management (start, stop etc.) - if location.get('service', None) and os.path.exists("/etc/init.d/%s" % self.name): - # SysV init script - self.svc_cmd = location['service'] - elif location.get('start', None) and os.path.exists("/etc/init/%s.conf" % self.name): - # upstart -- rather than being managed by one command, start/stop/restart are actual commands - self.svc_cmd = '' - else: - # still a SysV init script, but /sbin/service isn't installed - for initdir in initpaths: - initscript = "%s/%s" % (initdir,self.name) - if os.path.isfile(initscript): - self.svc_initscript = initscript + if location.get('start', False): + # upstart -- rather than being managed by one command, start/stop/restart are actual commands + self.svc_cmd = '' + + elif location.get('rc-service', False): + # service is managed by OpenRC + self.svc_cmd = location['rc-service'] + self.enable_cmd = location['rc-update'] + return # already have service start/stop tool too! - # couldn't find anything yet, assume systemd - if self.svc_cmd is None and self.svc_initscript is None: - if location.get('systemctl'): - self.svc_cmd = location['systemctl'] + elif self.svc_initscript: + # service is managed by with SysV init scripts + if location.get('update-rc.d', False): + # and uses update-rc.d + self.enable_cmd = location['update-rc.d'] + elif location.get('chkconfig', False): + # and uses chkconfig + self.enable_cmd = location['chkconfig'] + + if self.enable_cmd is None: + self.module.fail_json(msg="no service or tool found for: %s" % self.name) + + # If no service control tool selected yet, try to see if 'service' is available + if not self.svc_cmd and location.get('service', False): + self.svc_cmd = location['service'] + # couldn't find anything yet if self.svc_cmd is None and not self.svc_initscript: self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting') - if location.get('initctl', None): + if location.get('initctl', False): self.svc_initctl = location['initctl'] def get_systemd_status_dict(self): From 383ecdb50bf3c906b83040f436b81570798a98f3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 18 Nov 2014 13:08:26 -0800 Subject: [PATCH 095/250] Correct variable name --- packaging/os/apt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 5c557900b76..b175b6ae317 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -220,7 +220,7 @@ def package_status(m, pkgname, version, cache, state): # Only claim the package is upgradable if a candidate matches the version package_is_upgradable = False for candidate in avail_upgrades: - if pkg.versions[candidate] > p.installed: + if pkg.versions[candidate] > pkg.installed: package_is_upgradable = True break else: From c46f39f0442ecaaa5eec60d8d895ee80ff7ba656 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 19 Nov 2014 12:30:24 -0800 Subject: [PATCH 096/250] Fix git module checking out correct version after initial clone Fixes #313 --- source_control/git.py | 1 + 1 file changed, 1 insertion(+) diff --git a/source_control/git.py b/source_control/git.py index 998e684afb1..17378fc97b6 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -646,6 +646,7 @@ def main(): module.exit_json(changed=True, before=before, after=remote_head) # there's no git config, so clone clone(git_path, module, repo, dest, remote, depth, version, bare, reference, recursive) + repo_updated = True elif not update: # Just return having found a repo already in the dest path # this does no checking that the repo is the actual repo From 19b328c4df2157b6c0191e9144236643ce2be890 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 19 Nov 2014 13:04:44 -0800 Subject: [PATCH 097/250] Cannot readlink() on a hard link --- files/file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/file.py b/files/file.py index be7b05aaf52..7aa5e45d7bc 100644 --- a/files/file.py +++ b/files/file.py @@ -170,7 +170,7 @@ def main(): src = os.path.expanduser(src) else: if state in ['link','hard']: - if follow: + if follow and state == 'link': # use the current target of the link as the source src = os.readlink(path) else: From 30c7f9a7d377cd225f2e0e43b0990839867d561b Mon Sep 17 00:00:00 2001 From: Will Thames Date: Sat, 1 Nov 2014 12:44:44 +1000 Subject: [PATCH 098/250] Added better region handling and enabled eu-central-1 Make use of improved connect_to_aws that throws an exception if a region can't be connected to (e.g. eu-central-1 requires boto 2.34 onwards) Add eu-central-1 to the two modules that hardcode their regions Add us-gov-west-1 to ec2_ami_search to match documentation! This pull request makes use of the changes in ansible/ansible#9419 --- cloud/amazon/ec2_ami_search.py | 7 +++++-- cloud/amazon/ec2_asg.py | 2 +- cloud/amazon/ec2_elb.py | 4 ++-- cloud/amazon/ec2_elb_lb.py | 2 +- cloud/amazon/ec2_facts.py | 1 + cloud/amazon/ec2_lc.py | 2 +- cloud/amazon/ec2_metric_alarm.py | 2 +- cloud/amazon/ec2_scaling_policy.py | 4 +--- 8 files changed, 13 insertions(+), 11 deletions(-) diff --git a/cloud/amazon/ec2_ami_search.py b/cloud/amazon/ec2_ami_search.py index 25875de39bd..36a0ab38f22 100644 --- a/cloud/amazon/ec2_ami_search.py +++ b/cloud/amazon/ec2_ami_search.py @@ -56,7 +56,8 @@ options: required: false default: us-east-1 choices: ["ap-northeast-1", "ap-southeast-1", "ap-southeast-2", - "eu-west-1", "sa-east-1", "us-east-1", "us-west-1", "us-west-2", "us-gov-west-1"] + "eu-central-1", "eu-west-1", "sa-east-1", "us-east-1", + "us-west-1", "us-west-2", "us-gov-west-1"] virt: description: virutalization type required: false @@ -88,11 +89,13 @@ SUPPORTED_DISTROS = ['ubuntu'] AWS_REGIONS = ['ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', + 'eu-central-1', 'eu-west-1', 'sa-east-1', 'us-east-1', 'us-west-1', - 'us-west-2'] + 'us-west-2', + "us-gov-west-1"] def get_url(module, url): diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 2b060ccca37..8f08aaf874e 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -272,7 +272,7 @@ def create_autoscaling_group(connection, module): region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) asg_tags = [] diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py index 42cb1819025..41883de15ce 100644 --- a/cloud/amazon/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -258,7 +258,7 @@ class ElbManager: try: elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) elbs = elb.get_all_load_balancers() @@ -278,7 +278,7 @@ class ElbManager: try: ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) return ec2.get_only_instances(instance_ids=[self.instance_id])[0] diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 462fbbcc797..4717e767600 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -374,7 +374,7 @@ class ElbManager(object): try: return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) def _delete_elb(self): diff --git a/cloud/amazon/ec2_facts.py b/cloud/amazon/ec2_facts.py index 7b5c610dc2d..c6fbf86b724 100644 --- a/cloud/amazon/ec2_facts.py +++ b/cloud/amazon/ec2_facts.py @@ -65,6 +65,7 @@ class Ec2Metadata(object): AWS_REGIONS = ('ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', + 'eu-central-1', 'eu-west-1', 'sa-east-1', 'us-east-1', diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index f75dfe6d938..c4b7f70b924 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -265,7 +265,7 @@ def main(): try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) state = module.params.get('state') diff --git a/cloud/amazon/ec2_metric_alarm.py b/cloud/amazon/ec2_metric_alarm.py index 519f88f24f8..7a8d573ce74 100644 --- a/cloud/amazon/ec2_metric_alarm.py +++ b/cloud/amazon/ec2_metric_alarm.py @@ -271,7 +271,7 @@ def main(): region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) if state == 'present': diff --git a/cloud/amazon/ec2_scaling_policy.py b/cloud/amazon/ec2_scaling_policy.py index ad1fa7ce7f1..8e7d459e3e3 100644 --- a/cloud/amazon/ec2_scaling_policy.py +++ b/cloud/amazon/ec2_scaling_policy.py @@ -163,9 +163,7 @@ def main(): try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - if not connection: - module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region)) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg = str(e)) if state == 'present': From 07faee61deecfac38521dbd20c7aae9762bbdf29 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Mon, 24 Nov 2014 00:43:55 -0500 Subject: [PATCH 099/250] Minor win_user fixes based on feedback. --- windows/win_user.ps1 | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/windows/win_user.ps1 b/windows/win_user.ps1 index a805fac7f25..ae4847a8528 100644 --- a/windows/win_user.ps1 +++ b/windows/win_user.ps1 @@ -150,8 +150,9 @@ If ($state -eq 'present') { } ElseIf (($password -ne $null) -and ($update_password -eq 'always')) { [void][system.reflection.assembly]::LoadWithPartialName('System.DirectoryServices.AccountManagement') - $pc = New-Object -TypeName System.DirectoryServices.AccountManagement.PrincipalContext 'Machine', $env:COMPUTERNAME - # FIXME: ValidateCredentials fails if PasswordExpired == 1 + $host_name = [System.Net.Dns]::GetHostName() + $pc = New-Object -TypeName System.DirectoryServices.AccountManagement.PrincipalContext 'Machine', $host_name + # ValidateCredentials fails if PasswordExpired == 1 If (!$pc.ValidateCredentials($username, $password)) { $user_obj.SetPassword($password) $result.changed = $true @@ -195,6 +196,9 @@ If ($state -eq 'present') { $user_obj.IsAccountLocked = $account_locked $result.changed = $true } + If ($result.changed) { + $user_obj.SetInfo() + } If ($groups.GetType) { [string[]]$current_groups = $user_obj.Groups() | ForEach { $_.GetType().InvokeMember("Name", "GetProperty", $null, $_, $null) } If (($groups_action -eq "remove") -or ($groups_action -eq "replace")) { @@ -226,9 +230,6 @@ If ($state -eq 'present') { } } } - If ($result.changed) { - $user_obj.SetInfo() - } } catch { Fail-Json $result $_.Exception.Message From 1b0afb137c78383c47b3aaa31f4b849ddcb8783f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 24 Nov 2014 20:51:27 -0800 Subject: [PATCH 100/250] More robust quoting of database identifiers Note: These aren't database values, those are already using the appropriate Pyhton DB API method for quoting. --- database/postgresql/postgresql_db.py | 40 +++++++++++++++++----------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/database/postgresql/postgresql_db.py b/database/postgresql/postgresql_db.py index 605be621601..f965eac211a 100644 --- a/database/postgresql/postgresql_db.py +++ b/database/postgresql/postgresql_db.py @@ -124,7 +124,9 @@ class NotSupportedError(Exception): # def set_owner(cursor, db, owner): - query = "ALTER DATABASE \"%s\" OWNER TO \"%s\"" % (db, owner) + query = "ALTER DATABASE %s OWNER TO %s" % ( + pg_quote_identifier(db, 'database'), + pg_quote_identifier(owner, 'role')) cursor.execute(query) return True @@ -141,7 +143,7 @@ def get_db_info(cursor, db): FROM pg_database JOIN pg_roles ON pg_roles.oid = pg_database.datdba WHERE datname = %(db)s """ - cursor.execute(query, {'db':db}) + cursor.execute(query, {'db': db}) return cursor.fetchone() def db_exists(cursor, db): @@ -151,28 +153,28 @@ def db_exists(cursor, db): def db_delete(cursor, db): if db_exists(cursor, db): - query = "DROP DATABASE \"%s\"" % db + query = "DROP DATABASE %s" % pg_quote_identifier(db, 'database') cursor.execute(query) return True else: return False def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype): + params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype) if not db_exists(cursor, db): + query_fragments = ['CREATE DATABASE %s' % pg_quote_identifier(db, 'database')] if owner: - owner = " OWNER \"%s\"" % owner + query_fragments.append('OWNER %s' % pg_quote_identifier(owner, 'role')) if template: - template = " TEMPLATE \"%s\"" % template + query_fragments.append('TEMPLATE %s' % pg_quote_identifier(template, 'database')) if encoding: - encoding = " ENCODING '%s'" % encoding + query_fragments.append('ENCODING %(enc)s') if lc_collate: - lc_collate = " LC_COLLATE '%s'" % lc_collate + query_fragments.append('LC_COLLATE %(collate)s') if lc_ctype: - lc_ctype = " LC_CTYPE '%s'" % lc_ctype - query = 'CREATE DATABASE "%s"%s%s%s%s%s' % (db, owner, - template, encoding, - lc_collate, lc_ctype) - cursor.execute(query) + query_fragments.append('LC_CTYPE %(ctype)s') + query = ' '.join(query_fragments) + cursor.execute(query, params) return True else: db_info = get_db_info(cursor, db) @@ -284,11 +286,17 @@ def main(): module.exit_json(changed=changed,db=db) if state == "absent": - changed = db_delete(cursor, db) + try: + changed = db_delete(cursor, db) + except SQLParseError, e: + module.fail_json(msg=str(e)) elif state == "present": - changed = db_create(cursor, db, owner, template, encoding, + try: + changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype) + except SQLParseError, e: + module.fail_json(msg=str(e)) except NotSupportedError, e: module.fail_json(msg=str(e)) except Exception, e: @@ -298,4 +306,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.database import * +if __name__ == '__main__': + main() From 51910a1a33c2144cea7e8b792571f798b6d546aa Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 24 Nov 2014 22:30:10 -0800 Subject: [PATCH 101/250] Audit escaping of identifiers in the postgresql_user module --- database/postgresql/postgresql_user.py | 139 +++++++++++++++---------- 1 file changed, 86 insertions(+), 53 deletions(-) diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index 8af8c45d0c5..ecc1ffb607b 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -145,6 +145,7 @@ INSERT,UPDATE/table:SELECT/anothertable:ALL ''' import re +import itertools try: import psycopg2 @@ -153,6 +154,19 @@ except ImportError: else: postgresqldb_found = True +_flags = ('SUPERUSER', 'CREATEROLE', 'CREATEUSER', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION') +VALID_FLAGS = frozenset(itertools.chain(_flags, ('NO%s' %f for f in _flags))) + +VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')), + database=frozenset(('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')), + ) + +class InvalidFlagsError(Exception): + pass + +class InvalidPrivsError(Exception): + pass + # =========================================== # PostgreSQL module specific support methods. # @@ -167,17 +181,18 @@ def user_exists(cursor, user): return cursor.rowcount > 0 -def user_add(cursor, user, password, role_attr_flags, encrypted, expires): +def user_add(cursor, user, password, role_attr_flags, encrypted, expires): """Create a new database user (role).""" - query_password_data = dict() - query = 'CREATE USER "%(user)s"' % { "user": user} + # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a literal + query_password_data = dict(password=password, expires=expires) + query = ['CREATE USER %(user)s' % { "user": pg_quote_identifier(user, 'role')}] if password is not None: - query = query + " WITH %(crypt)s" % { "crypt": encrypted } - query = query + " PASSWORD %(password)s" - query_password_data.update(password=password) + query.append("WITH %(crypt)s" % { "crypt": encrypted }) + query.append("PASSWORD %(password)s") if expires is not None: - query = query + " VALID UNTIL '%(expires)s'" % { "expires": expires } - query = query + " " + role_attr_flags + query.append("VALID UNTIL %(expires)s") + query = query.append(role_attr_flags) + query = ' '.join(query) cursor.execute(query, query_password_data) return True @@ -185,6 +200,7 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir """Change user password and/or attributes. Return True if changed, False otherwise.""" changed = False + # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a literal if user == 'PUBLIC': if password is not None: module.fail_json(msg="cannot change the password for PUBLIC user") @@ -196,22 +212,21 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir # Handle passwords. if password is not None or role_attr_flags is not None: # Select password and all flag-like columns in order to verify changes. - query_password_data = dict() + query_password_data = dict(password=password, expires=expires) select = "SELECT * FROM pg_authid where rolname=%(user)s" cursor.execute(select, {"user": user}) # Grab current role attributes. current_role_attrs = cursor.fetchone() - alter = 'ALTER USER "%(user)s"' % {"user": user} + alter = ['ALTER USER "%(user)s"' % {"user": pg_quote_identifier(user, 'role')}] if password is not None: - query_password_data.update(password=password) - alter = alter + " WITH %(crypt)s" % {"crypt": encrypted} - alter = alter + " PASSWORD %(password)s" - alter = alter + " %(flags)s" % {'flags': role_attr_flags} + alter.append("WITH %(crypt)s" % {"crypt": encrypted}) + alter.append("PASSWORD %(password)s") + alter.append(role_attr_flags) elif role_attr_flags: - alter = alter + ' WITH ' + role_attr_flags + alter.append('WITH %s' % role_attr_flags) if expires is not None: - alter = alter + " VALID UNTIL '%(expires)s'" % { "exipres": expires } + alter.append("VALID UNTIL %(expires)s") try: cursor.execute(alter, query_password_data) @@ -240,7 +255,7 @@ def user_delete(cursor, user): """Try to remove a user. Returns True if successful otherwise False""" cursor.execute("SAVEPOINT ansible_pgsql_user_delete") try: - cursor.execute("DROP USER \"%s\"" % user) + cursor.execute("DROP USER %s" % pg_quote_identifier(user, 'role')) except: cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete") cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete") @@ -264,36 +279,20 @@ def get_table_privileges(cursor, user, table): cursor.execute(query, (user, table, schema)) return set([x[0] for x in cursor.fetchall()]) - -def quote_pg_identifier(identifier): - """ - quote postgresql identifiers involving zero or more namespaces - """ - - if '"' in identifier: - # the user has supplied their own quoting. we have to hope they're - # doing it right. Maybe they have an unfortunately named table - # containing a period in the name, such as: "public"."users.2013" - return identifier - - tokens = identifier.strip().split(".") - quoted_tokens = [] - for token in tokens: - quoted_tokens.append('"%s"' % (token, )) - return ".".join(quoted_tokens) - def grant_table_privilege(cursor, user, table, priv): + # Note: priv escaped by parse_privs prev_priv = get_table_privileges(cursor, user, table) query = 'GRANT %s ON TABLE %s TO %s' % ( - priv, quote_pg_identifier(table), quote_pg_identifier(user), ) + priv, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') ) cursor.execute(query) curr_priv = get_table_privileges(cursor, user, table) return len(curr_priv) > len(prev_priv) def revoke_table_privilege(cursor, user, table, priv): + # Note: priv escaped by parse_privs prev_priv = get_table_privileges(cursor, user, table) query = 'REVOKE %s ON TABLE %s FROM %s' % ( - priv, quote_pg_identifier(table), quote_pg_identifier(user), ) + priv, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') ) cursor.execute(query) curr_priv = get_table_privileges(cursor, user, table) return len(curr_priv) < len(prev_priv) @@ -324,21 +323,29 @@ def has_database_privilege(cursor, user, db, priv): return cursor.fetchone()[0] def grant_database_privilege(cursor, user, db, priv): + # Note: priv escaped by parse_privs prev_priv = get_database_privileges(cursor, user, db) if user == "PUBLIC": - query = 'GRANT %s ON DATABASE \"%s\" TO PUBLIC' % (priv, db) + query = 'GRANT %s ON DATABASE %s TO PUBLIC' % ( + priv, pg_quote_identifier(db, 'database')) else: - query = 'GRANT %s ON DATABASE \"%s\" TO \"%s\"' % (priv, db, user) + query = 'GRANT %s ON DATABASE %s TO %s' % ( + priv, pg_quote_identifier(db, 'database'), + pg_quote_identifier(user, 'role')) cursor.execute(query) curr_priv = get_database_privileges(cursor, user, db) return len(curr_priv) > len(prev_priv) def revoke_database_privilege(cursor, user, db, priv): + # Note: priv escaped by parse_privs prev_priv = get_database_privileges(cursor, user, db) if user == "PUBLIC": - query = 'REVOKE %s ON DATABASE \"%s\" FROM PUBLIC' % (priv, db) + query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % ( + priv, pg_quote_identifier(db, 'database')) else: - query = 'REVOKE %s ON DATABASE \"%s\" FROM \"%s\"' % (priv, db, user) + query = 'REVOKE %s ON DATABASE %s FROM %s' % ( + priv, pg_quote_identifier(db, 'database'), + pg_quote_identifier(user, 'role')) cursor.execute(query) curr_priv = get_database_privileges(cursor, user, db) return len(curr_priv) < len(prev_priv) @@ -387,11 +394,18 @@ def parse_role_attrs(role_attr_flags): Where: attributes := CREATEDB,CREATEROLE,NOSUPERUSER,... + [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB", + "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ] + """ - if ',' not in role_attr_flags: - return role_attr_flags - flag_set = role_attr_flags.split(",") - o_flags = " ".join(flag_set) + if ',' in role_attr_flags: + flag_set = frozenset(role_attr_flags.split(",")) + else: + flag_set = frozenset(role_attr_flags) + if not flag_set.is_subset(VALID_FLAGS): + raise InvalidFlagsError('Invalid role_attr_flags specified: %s' % + ' '.join(flag_set.difference(VALID_FLAGS))) + o_flags = ' '.join(flag_set) return o_flags def parse_privs(privs, db): @@ -417,12 +431,15 @@ def parse_privs(privs, db): if ':' not in token: type_ = 'database' name = db - priv_set = set(x.strip() for x in token.split(',')) + priv_set = frozenset(x.strip() for x in token.split(',')) else: type_ = 'table' name, privileges = token.split(':', 1) - priv_set = set(x.strip() for x in privileges.split(',')) + priv_set = frozenset(x.strip() for x in privileges.split(',')) + if not priv_set.issubset(VALID_PRIVS[type_]): + raise InvalidPrivsError('Invalid privs specified for %s: %s' % + (type_, ' '.join(priv_set.difference(VALID_PRIVS[type_])))) o_privs[type_][name] = priv_set return o_privs @@ -460,7 +477,10 @@ def main(): module.fail_json(msg="privileges require a database to be specified") privs = parse_privs(module.params["priv"], db) port = module.params["port"] - role_attr_flags = parse_role_attrs(module.params["role_attr_flags"]) + try: + role_attr_flags = parse_role_attrs(module.params["role_attr_flags"]) + except InvalidFlagsError, e: + module.fail_json(msg=str(e)) if module.params["encrypted"]: encrypted = "ENCRYPTED" else: @@ -494,18 +514,30 @@ def main(): if state == "present": if user_exists(cursor, user): - changed = user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires) + try: + changed = user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires) + except SQLParseError, e: + module.fail_json(msg=str(e)) else: - changed = user_add(cursor, user, password, role_attr_flags, encrypted, expires) - changed = grant_privileges(cursor, user, privs) or changed + try: + changed = user_add(cursor, user, password, role_attr_flags, encrypted, expires) + except SQLParseError, e: + module.fail_json(msg=str(e)) + try: + changed = grant_privileges(cursor, user, privs) or changed + except SQLParseError, e: + module.fail_json(msg=str(e)) else: if user_exists(cursor, user): if module.check_mode: changed = True kw['user_removed'] = True else: - changed = revoke_privileges(cursor, user, privs) - user_removed = user_delete(cursor, user) + try: + changed = revoke_privileges(cursor, user, privs) + user_removed = user_delete(cursor, user) + except SQLParseError, e: + module.fail_json(msg=str(e)) changed = changed or user_removed if fail_on_user and not user_removed: msg = "unable to remove user" @@ -523,4 +555,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.database import * main() From fbc4ed7a886109b8ba761609f80e6813d85d3e72 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 24 Nov 2014 22:56:51 -0800 Subject: [PATCH 102/250] Make sure we quote or confirm good all user provided identifiers --- database/postgresql/postgresql_privs.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/database/postgresql/postgresql_privs.py b/database/postgresql/postgresql_privs.py index de5fa94fa48..8fa8696f103 100644 --- a/database/postgresql/postgresql_privs.py +++ b/database/postgresql/postgresql_privs.py @@ -230,6 +230,9 @@ except ImportError: psycopg2 = None +VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', + 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT', + 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL')) class Error(Exception): pass @@ -454,19 +457,21 @@ class Connection(object): else: obj_ids = ['"%s"' % o for o in objs] - # set_what: SQL-fragment specifying what to set for the target roless: - # Either group membership or privileges on objects of a certain type. + # set_what: SQL-fragment specifying what to set for the target roles: + # Either group membership or privileges on objects of a certain type if obj_type == 'group': - set_what = ','.join(obj_ids) + set_what = ','.join(pg_quote_identifiers(i, 'role') for i in obj_ids) else: - set_what = '%s ON %s %s' % (','.join(privs), obj_type, - ','.join(obj_ids)) + # Note: obj_type has been checked against a set of string literals + # and privs was escaped when it was parsed + set_what = '%s ON %s %s' % (','.join(privs), obj_type, + ','.join(pg_quote_identifiers(i, 'table') for i in obj_ids)) # for_whom: SQL-fragment specifying for whom to set the above if roles == 'PUBLIC': for_whom = 'PUBLIC' else: - for_whom = ','.join(['"%s"' % r for r in roles]) + for_whom = ','.join(pg_quote_identifiers(r, 'role') for r in roles) status_before = get_status(objs) if state == 'present': @@ -558,7 +563,9 @@ def main(): try: # privs if p.privs: - privs = p.privs.split(',') + privs = frozenset(p.privs.split(',')) + if not privs.issubset(VALID_PRIVS): + module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS)) else: privs = None @@ -610,4 +617,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.database import * +if __name__ == '__main__': + main() From c84ae5429444d42da2e6af44332759d0a92c0f52 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 25 Nov 2014 00:44:18 -0800 Subject: [PATCH 103/250] Normalize privs and flags to uppercase so comparisons against allowed names will work --- database/postgresql/postgresql_privs.py | 2 +- database/postgresql/postgresql_user.py | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/database/postgresql/postgresql_privs.py b/database/postgresql/postgresql_privs.py index 8fa8696f103..55821756246 100644 --- a/database/postgresql/postgresql_privs.py +++ b/database/postgresql/postgresql_privs.py @@ -563,7 +563,7 @@ def main(): try: # privs if p.privs: - privs = frozenset(p.privs.split(',')) + privs = frozenset(pr.upper() for pr in p.privs.split(',')) if not privs.issubset(VALID_PRIVS): module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS)) else: diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index ecc1ffb607b..0823bd7cd90 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -155,7 +155,7 @@ else: postgresqldb_found = True _flags = ('SUPERUSER', 'CREATEROLE', 'CREATEUSER', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION') -VALID_FLAGS = frozenset(itertools.chain(_flags, ('NO%s' %f for f in _flags))) +VALID_FLAGS = frozenset(itertools.chain(_flags, ('NO%s' % f for f in _flags))) VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')), database=frozenset(('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')), @@ -399,9 +399,9 @@ def parse_role_attrs(role_attr_flags): """ if ',' in role_attr_flags: - flag_set = frozenset(role_attr_flags.split(",")) + flag_set = frozenset(r.upper() for r in role_attr_flags.split(",")) else: - flag_set = frozenset(role_attr_flags) + flag_set = frozenset(role_attr_flags.upper()) if not flag_set.is_subset(VALID_FLAGS): raise InvalidFlagsError('Invalid role_attr_flags specified: %s' % ' '.join(flag_set.difference(VALID_FLAGS))) @@ -431,11 +431,11 @@ def parse_privs(privs, db): if ':' not in token: type_ = 'database' name = db - priv_set = frozenset(x.strip() for x in token.split(',')) + priv_set = frozenset(x.strip().upper() for x in token.split(',')) else: type_ = 'table' name, privileges = token.split(':', 1) - priv_set = frozenset(x.strip() for x in privileges.split(',')) + priv_set = frozenset(x.strip().upper() for x in privileges.split(',')) if not priv_set.issubset(VALID_PRIVS[type_]): raise InvalidPrivsError('Invalid privs specified for %s: %s' % From 06ac459fc5d855dd3725b6a4e17ce57bed1c66f7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 25 Nov 2014 01:42:33 -0800 Subject: [PATCH 104/250] Correct new function name --- database/postgresql/postgresql_privs.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/database/postgresql/postgresql_privs.py b/database/postgresql/postgresql_privs.py index 55821756246..febdc9edf17 100644 --- a/database/postgresql/postgresql_privs.py +++ b/database/postgresql/postgresql_privs.py @@ -460,18 +460,18 @@ class Connection(object): # set_what: SQL-fragment specifying what to set for the target roles: # Either group membership or privileges on objects of a certain type if obj_type == 'group': - set_what = ','.join(pg_quote_identifiers(i, 'role') for i in obj_ids) + set_what = ','.join(pg_quote_identifier(i, 'role') for i in obj_ids) else: # Note: obj_type has been checked against a set of string literals # and privs was escaped when it was parsed set_what = '%s ON %s %s' % (','.join(privs), obj_type, - ','.join(pg_quote_identifiers(i, 'table') for i in obj_ids)) + ','.join(pg_quote_identifier(i, 'table') for i in obj_ids)) # for_whom: SQL-fragment specifying for whom to set the above if roles == 'PUBLIC': for_whom = 'PUBLIC' else: - for_whom = ','.join(pg_quote_identifiers(r, 'role') for r in roles) + for_whom = ','.join(pg_quote_identifier(r, 'role') for r in roles) status_before = get_status(objs) if state == 'present': From 10ebcccedb542c7e1c499e77a1f53da98d373bc3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 25 Nov 2014 01:46:09 -0800 Subject: [PATCH 105/250] Escape mysql identifiers --- database/mysql/mysql_db.py | 16 +++++---- database/mysql/mysql_user.py | 58 +++++++++++++++++++++++-------- database/mysql/mysql_variables.py | 13 +++++-- 3 files changed, 64 insertions(+), 23 deletions(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index 38dee608ba5..3983c66639a 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -118,7 +118,7 @@ def db_exists(cursor, db): return bool(res) def db_delete(cursor, db): - query = "DROP DATABASE `%s`" % db + query = "DROP DATABASE %s" % mysql_quote_identifier(db, 'database') cursor.execute(query) return True @@ -190,12 +190,14 @@ def db_import(module, host, user, password, db_name, target, port, socket=None): return rc, stdout, stderr def db_create(cursor, db, encoding, collation): + query_params = dict(enc=encoding, collate=collation) + query = ['CREATE DATABASE %s' % mysql_quote_identifier(db, 'database')] if encoding: - encoding = " CHARACTER SET %s" % encoding + query.append("CHARACTER SET %(enc)s") if collation: - collation = " COLLATE %s" % collation - query = "CREATE DATABASE `%s`%s%s" % (db, encoding, collation) - res = cursor.execute(query) + query.append("COLLATE %(collate)s") + query = ' '.join(query) + res = cursor.execute(query, query_params) return True def strip_quotes(s): @@ -360,4 +362,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.database import * +if __name__ == '__main__': + main() diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index aaec05f99f5..3fac5bc2759 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -151,6 +151,19 @@ except ImportError: else: mysqldb_found = True +VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT OPTION', 'LOCK TABLES', + 'REFERENCES', 'EVENT', 'ALTER', 'DELETE', 'INDEX', + 'INSERT', 'SELECT', 'UPDATE', + 'CREATE TEMPORARY TABLES', 'TRIGGER', 'CREATE VIEW', + 'SHOW VIEW', 'ALTER ROUTINE', 'CREATE ROUTINE', + 'EXECUTE', 'FILE', 'CREATE USER', 'PROCESS', 'RELOAD', + 'REPLICATION CLIENT', 'REPLICATION SLAVE', + 'SHOW DATABASES', 'SHUTDOWN', 'SUPER', 'ALL', + 'ALL PRIVILEGES', 'USAGE',)) + +class InvalidPrivsError(Exception): + pass + # =========================================== # MySQL module specific support methods. # @@ -217,7 +230,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs): return changed def user_delete(cursor, user, host): - cursor.execute("DROP USER %s@%s", (user,host)) + cursor.execute("DROP USER %s@%s", (user, host)) return True def privileges_get(cursor, user,host): @@ -231,7 +244,7 @@ def privileges_get(cursor, user,host): The dictionary format is the same as that returned by privileges_unpack() below. """ output = {} - cursor.execute("SHOW GRANTS FOR %s@%s", (user,host)) + cursor.execute("SHOW GRANTS FOR %s@%s", (user, host)) grants = cursor.fetchall() def pick(x): @@ -274,6 +287,9 @@ def privileges_unpack(priv): pieces[0] = '.'.join(pieces[0]) output[pieces[0]] = pieces[1].upper().split(',') + new_privs = frozenset(output[pieces[0]]) + if not new_privs.issubset(VALID_PRIVS): + raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS)) if '*.*' not in output: output['*.*'] = ['USAGE'] @@ -282,18 +298,24 @@ def privileges_unpack(priv): def privileges_revoke(cursor, user,host,db_table,grant_option): if grant_option: - query = "REVOKE GRANT OPTION ON %s FROM '%s'@'%s'" % (db_table,user,host) - cursor.execute(query) - query = "REVOKE ALL PRIVILEGES ON %s FROM '%s'@'%s'" % (db_table,user,host) - cursor.execute(query) + query = ["REVOKE GRANT OPTION ON %s" % mysql_quote_identifier(db_table, 'table')] + query.append("FROM %s@%s") + query = ' '.join(query) + cursor.execute(query, (user, host)) + query = ["REVOKE ALL PRIVILEGES ON %s" % mysql_quote_identifier(db_table, 'table')] + query.append("FROM %s@%s") + query = ' '.join(query) + cursor.execute(query, (user, host)) def privileges_grant(cursor, user,host,db_table,priv): priv_string = ",".join(filter(lambda x: x != 'GRANT', priv)) - query = "GRANT %s ON %s TO '%s'@'%s'" % (priv_string,db_table,user,host) + query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))] + query.append("TO %s@%s") if 'GRANT' in priv: - query = query + " WITH GRANT OPTION" - cursor.execute(query) + query.append("WITH GRANT OPTION") + query = ' '.join(query) + cursor.execute(query, (user, host)) def strip_quotes(s): @@ -425,8 +447,8 @@ def main(): if priv is not None: try: priv = privileges_unpack(priv) - except: - module.fail_json(msg="invalid privileges string") + except Exception, e: + module.fail_json(msg="invalid privileges string: %s" % str(e)) # Either the caller passes both a username and password with which to connect to # mysql, or they pass neither and allow this module to read the credentials from @@ -459,11 +481,17 @@ def main(): if state == "present": if user_exists(cursor, user, host): - changed = user_mod(cursor, user, host, password, priv, append_privs) + try: + changed = user_mod(cursor, user, host, password, priv, append_privs) + except SQLParseError, e: + module.fail_json(msg=str(e)) else: if password is None: module.fail_json(msg="password parameter required when adding a user") - changed = user_add(cursor, user, host, password, priv) + try: + changed = user_add(cursor, user, host, password, priv) + except SQLParseError, e: + module.fail_json(msg=str(e)) elif state == "absent": if user_exists(cursor, user, host): changed = user_delete(cursor, user, host) @@ -473,4 +501,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.database import * +if __name__ == '__main__': + main() diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py index 7353fdd485d..199c5eb6eca 100644 --- a/database/mysql/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -103,7 +103,7 @@ def typedvalue(value): def getvariable(cursor, mysqlvar): - cursor.execute("SHOW VARIABLES LIKE '" + mysqlvar + "'") + cursor.execute("SHOW VARIABLES LIKE %s", (mysqlvar,)) mysqlvar_val = cursor.fetchall() return mysqlvar_val @@ -116,8 +116,11 @@ def setvariable(cursor, mysqlvar, value): should be passed as numeric literals. """ + query = ["SET GLOBAL %s" % mysql_quote_identifier(mysqlvar, 'vars') ] + query.append(" = %s") + query = ' '.join(query) try: - cursor.execute("SET GLOBAL " + mysqlvar + " = %s", (value,)) + cursor.execute(query, (value,)) cursor.fetchall() result = True except Exception, e: @@ -242,7 +245,10 @@ def main(): value_actual = typedvalue(mysqlvar_val[0][1]) if value_wanted == value_actual: module.exit_json(msg="Variable already set to requested value", changed=False) - result = setvariable(cursor, mysqlvar, value_wanted) + try: + result = setvariable(cursor, mysqlvar, value_wanted) + except SQLParseError, e: + result = str(e) if result is True: module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual, changed=True) else: @@ -250,4 +256,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.database import * main() From 4a3d7473fdc8d7c090c93d04c7bc3defc83147d3 Mon Sep 17 00:00:00 2001 From: Devin Christensen Date: Tue, 25 Nov 2014 10:44:04 -0700 Subject: [PATCH 106/250] Fix syntax error --- database/postgresql/postgresql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index 0823bd7cd90..7839669cb1a 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -402,7 +402,7 @@ def parse_role_attrs(role_attr_flags): flag_set = frozenset(r.upper() for r in role_attr_flags.split(",")) else: flag_set = frozenset(role_attr_flags.upper()) - if not flag_set.is_subset(VALID_FLAGS): + if not flag_set.issubset(VALID_FLAGS): raise InvalidFlagsError('Invalid role_attr_flags specified: %s' % ' '.join(flag_set.difference(VALID_FLAGS))) o_flags = ' '.join(flag_set) From c77ab67274600aff72b0d10eebcd5b7b8c32b0c5 Mon Sep 17 00:00:00 2001 From: Devin Christensen Date: Tue, 25 Nov 2014 11:46:41 -0700 Subject: [PATCH 107/250] Fix user_add in postgresql_user --- database/postgresql/postgresql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index 7839669cb1a..d7cca2fcd68 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -191,7 +191,7 @@ def user_add(cursor, user, password, role_attr_flags, encrypted, expires): query.append("PASSWORD %(password)s") if expires is not None: query.append("VALID UNTIL %(expires)s") - query = query.append(role_attr_flags) + query.append(role_attr_flags) query = ' '.join(query) cursor.execute(query, query_password_data) return True From 06f1c1a97ec40aa78b0d413819ec6514f5c3ff88 Mon Sep 17 00:00:00 2001 From: Devin Christensen Date: Tue, 25 Nov 2014 12:04:47 -0700 Subject: [PATCH 108/250] Fix user_alter in postgresql_user --- database/postgresql/postgresql_user.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index d7cca2fcd68..421831e8165 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -218,7 +218,7 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir # Grab current role attributes. current_role_attrs = cursor.fetchone() - alter = ['ALTER USER "%(user)s"' % {"user": pg_quote_identifier(user, 'role')}] + alter = ['ALTER USER %(user)s' % {"user": pg_quote_identifier(user, 'role')}] if password is not None: alter.append("WITH %(crypt)s" % {"crypt": encrypted}) alter.append("PASSWORD %(password)s") @@ -229,7 +229,7 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir alter.append("VALID UNTIL %(expires)s") try: - cursor.execute(alter, query_password_data) + cursor.execute(' '.join(alter), query_password_data) except psycopg2.InternalError, e: if e.pgcode == '25006': # Handle errors due to read-only transactions indicated by pgcode 25006 From 41559311d8e330d369c764f42c0e0396f626f177 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 25 Nov 2014 16:03:52 -0800 Subject: [PATCH 109/250] Fix cornercase tracebaxk when detecting whether submodules changed --- source_control/git.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source_control/git.py b/source_control/git.py index 17378fc97b6..06768744b13 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -688,6 +688,7 @@ def main(): switch_version(git_path, module, dest, remote, version, recursive, track_submodules) # Deal with submodules + submodules_updated = False if recursive and not bare: submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest) @@ -707,7 +708,7 @@ def main(): changed = False if before != after or local_mods: changed = True - elif recursive and submodules_updated: + elif submodules_updated: changed =True # cleanup the wrapper script From 7dd2859f9b13e9df3baa9f2ef947e3630a6e7dbc Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 26 Nov 2014 08:26:53 -0800 Subject: [PATCH 110/250] Add a bare grant to the list of allowed privileges --- database/mysql/mysql_user.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 3fac5bc2759..9bb1d7be4c7 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -151,13 +151,13 @@ except ImportError: else: mysqldb_found = True -VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT OPTION', 'LOCK TABLES', - 'REFERENCES', 'EVENT', 'ALTER', 'DELETE', 'INDEX', - 'INSERT', 'SELECT', 'UPDATE', +VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION', + 'LOCK TABLES', 'REFERENCES', 'EVENT', 'ALTER', + 'DELETE', 'INDEX', 'INSERT', 'SELECT', 'UPDATE', 'CREATE TEMPORARY TABLES', 'TRIGGER', 'CREATE VIEW', 'SHOW VIEW', 'ALTER ROUTINE', 'CREATE ROUTINE', - 'EXECUTE', 'FILE', 'CREATE USER', 'PROCESS', 'RELOAD', - 'REPLICATION CLIENT', 'REPLICATION SLAVE', + 'EXECUTE', 'FILE', 'CREATE USER', 'PROCESS', + 'RELOAD', 'REPLICATION CLIENT', 'REPLICATION SLAVE', 'SHOW DATABASES', 'SHUTDOWN', 'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE',)) From 2a794fa77693a58ed0c2585d3f70f686c38dbe93 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 26 Nov 2014 14:43:56 -0800 Subject: [PATCH 111/250] Fix for single role_attr --- database/postgresql/postgresql_user.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index 421831e8165..00001bdcd7b 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -400,8 +400,10 @@ def parse_role_attrs(role_attr_flags): """ if ',' in role_attr_flags: flag_set = frozenset(r.upper() for r in role_attr_flags.split(",")) + elif role_attr_flags: + flag_set = frozenset((role_attr_flags.upper(),)) else: - flag_set = frozenset(role_attr_flags.upper()) + flag_set = frozenset() if not flag_set.issubset(VALID_FLAGS): raise InvalidFlagsError('Invalid role_attr_flags specified: %s' % ' '.join(flag_set.difference(VALID_FLAGS))) @@ -431,11 +433,11 @@ def parse_privs(privs, db): if ':' not in token: type_ = 'database' name = db - priv_set = frozenset(x.strip().upper() for x in token.split(',')) + priv_set = frozenset(x.strip().upper() for x in token.split(',') if x.strip()) else: type_ = 'table' name, privileges = token.split(':', 1) - priv_set = frozenset(x.strip().upper() for x in privileges.split(',')) + priv_set = frozenset(x.strip().upper() for x in privileges.split(',') if x.strip()) if not priv_set.issubset(VALID_PRIVS[type_]): raise InvalidPrivsError('Invalid privs specified for %s: %s' % From 1ef8c26c0e12b608d2fd3be4ff940d77b6732426 Mon Sep 17 00:00:00 2001 From: Fabrice Bernhard Date: Thu, 27 Nov 2014 12:36:34 +0100 Subject: [PATCH 112/250] Typo in comments Credit goes to @kbsali @pborreli : you did not see that one? :-) --- system/authorized_key.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index f964113127e..d5792200b8d 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -333,7 +333,7 @@ def enforce_state(module, params): state = params.get("state", "present") key_options = params.get("key_options", None) - # extract indivial keys into an array, skipping blank lines and comments + # extract individual keys into an array, skipping blank lines and comments key = [s for s in key.splitlines() if s and not s.startswith('#')] From 32a00ec3a3305408d40b2213377187f15e296494 Mon Sep 17 00:00:00 2001 From: Pascal Borreli Date: Fri, 28 Nov 2014 16:36:21 +0100 Subject: [PATCH 113/250] Fixed typo --- system/sysctl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/sysctl.py b/system/sysctl.py index 979051e1f8e..3cf29f9a32b 100644 --- a/system/sysctl.py +++ b/system/sysctl.py @@ -84,7 +84,7 @@ EXAMPLES = ''' # Set kernel.panic to 3 in /tmp/test_sysctl.conf - sysctl: name=kernel.panic value=3 sysctl_file=/tmp/test_sysctl.conf reload=no -# Set ip fowarding on in /proc and do not reload the sysctl file +# Set ip forwarding on in /proc and do not reload the sysctl file - sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes # Set ip forwarding on in /proc and in the sysctl file and reload if necessary From e715909831e27cbfed2ae86e886bf154cefd7b6c Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Fri, 28 Nov 2014 23:19:33 -0500 Subject: [PATCH 114/250] git doc: don't prepend ssh:// for ssh repo The github ssh example has ssh:// at the beginning of the url. However, this doesn't work. It does work if the ssh:// is removed. --- source_control/git.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source_control/git.py b/source_control/git.py index 06768744b13..d7e12508803 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -155,7 +155,7 @@ EXAMPLES = ''' version=release-0.22 # Example read-write git checkout from github -- git: repo=ssh://git@github.com/mylogin/hello.git dest=/home/mylogin/hello +- git: repo=git@github.com/mylogin/hello.git dest=/home/mylogin/hello # Example just ensuring the repo checkout exists - git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout update=no From a6e0d9d613a4c56c5734993f3c900daf265532b7 Mon Sep 17 00:00:00 2001 From: follower Date: Sat, 29 Nov 2014 18:05:22 +1300 Subject: [PATCH 115/250] Fix typo of "rules_egress" Or is "rules_egree" supposed to be a plural? The sentence is difficult to parse. Maybe the correct fix is to "Purge existing rules on security group that are not found in rules_egress"? --- cloud/amazon/ec2_group.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_group.py b/cloud/amazon/ec2_group.py index 250095a9dcb..822147468a2 100644 --- a/cloud/amazon/ec2_group.py +++ b/cloud/amazon/ec2_group.py @@ -55,7 +55,7 @@ options: purge_rules_egress: version_added: "1.8" description: - - Purge existing rules_egree on security group that are not found in rules_egress + - Purge existing rules_egress on security group that are not found in rules_egress required: false default: 'true' aliases: [] From 87dd3afc91a5a17f531a4a76d0c4c7c70c92d380 Mon Sep 17 00:00:00 2001 From: Eric Date: Sat, 29 Nov 2014 22:41:24 -0600 Subject: [PATCH 116/250] Fix lineinfile documentation typo --- files/lineinfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/lineinfile.py b/files/lineinfile.py index c72b7f9d9a9..ef73bde7b7f 100644 --- a/files/lineinfile.py +++ b/files/lineinfile.py @@ -148,7 +148,7 @@ EXAMPLES = r""" - lineinfile: dest=/opt/jboss-as/bin/standalone.conf regexp='^(.*)Xms(\d+)m(.*)$' line='\1Xms${xms}m\3' backrefs=yes -# Validate a the sudoers file before saving +# Validate the sudoers file before saving - lineinfile: dest=/etc/sudoers state=present regexp='^%ADMIN ALL\=' line='%ADMIN ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s' """ From a375ce33d3c77d1b31168405b1459350e323374b Mon Sep 17 00:00:00 2001 From: Achilleas Pipinellis Date: Sat, 29 Nov 2014 15:49:36 +0200 Subject: [PATCH 117/250] Clarify supported init systems. --- system/service.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index 61915709703..9653174f3cb 100644 --- a/system/service.py +++ b/system/service.py @@ -25,7 +25,8 @@ author: Michael DeHaan version_added: "0.1" short_description: Manage services. description: - - Controls services on remote hosts. + - Controls services on remote hosts. Supported init systems are: BSD init, + OpenRC, SysV, systemd, upstart. options: name: required: true From 1cab3076495f4e6699df5a784500942260220e66 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 1 Dec 2014 07:15:27 -0800 Subject: [PATCH 118/250] Fix module traceback instead of returning an error --- database/mysql/mysql_user.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 9bb1d7be4c7..e8461a05851 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -256,7 +256,7 @@ def privileges_get(cursor, user,host): for grant in grants: res = re.match("GRANT (.+) ON (.+) TO '.+'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0]) if res is None: - module.fail_json(msg="unable to parse the MySQL grant string") + raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0]) privileges = res.group(1).split(", ") privileges = [ pick(x) for x in privileges] if "WITH GRANT OPTION" in res.group(4): @@ -485,6 +485,8 @@ def main(): changed = user_mod(cursor, user, host, password, priv, append_privs) except SQLParseError, e: module.fail_json(msg=str(e)) + except InvalidPrivsError, e: + module.mail_json(msg=str(e)) else: if password is None: module.fail_json(msg="password parameter required when adding a user") From 24137a3c6ce63eaa674e8468afaa90f4841d001f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Moser?= Date: Sun, 28 Sep 2014 20:30:39 +0200 Subject: [PATCH 119/250] yum: add update_cache option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Yum does not always update to latest package version unless metadata cache has expired. By runing yum makecache, we ensure the metadata cache has been updated. Signed-off-by: René Moser --- packaging/os/yum.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index c3158077d18..73fbb699e75 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -96,6 +96,16 @@ options: choices: ["yes", "no"] aliases: [] + update_cache: + description: + - Force updating the cache. Has an effect only if state is I(present) + or I(latest). + required: false + version_added: "1.9" + default: "no" + choices: ["yes", "no"] + aliases: [] + notes: [] # informational: requirements for nodes requirements: [ yum, rpm ] @@ -746,6 +756,10 @@ def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo, yum_basecmd.extend(r_cmd) if state in ['installed', 'present', 'latest']: + + if module.params.get('update_cache'): + module.run_command(yum_basecmd + ['makecache']) + my = yum_base(conf_file) try: for r in dis_repos: @@ -803,6 +817,7 @@ def main(): list=dict(), conf_file=dict(default=None), disable_gpg_check=dict(required=False, default="no", type='bool'), + update_cache=dict(required=False, default="no", type='bool'), # this should not be needed, but exists as a failsafe install_repoquery=dict(required=False, default="yes", type='bool'), ), From bf36697a556fed4c5ff4f3a5e3da11dc4fe415b4 Mon Sep 17 00:00:00 2001 From: Dan Date: Mon, 29 Sep 2014 15:58:00 -0700 Subject: [PATCH 120/250] Adds a login_unix_socket option to the postgresql_db module. --- database/postgresql/postgresql_db.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/database/postgresql/postgresql_db.py b/database/postgresql/postgresql_db.py index f965eac211a..60bbf247bf9 100644 --- a/database/postgresql/postgresql_db.py +++ b/database/postgresql/postgresql_db.py @@ -44,6 +44,11 @@ options: - Host running the database required: false default: localhost + login_unix_socket: + description + - Path to a Unix domain socket for local connections + required: false + default: null owner: description: - Name of the role to set as owner of the database @@ -178,7 +183,7 @@ def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype): return True else: db_info = get_db_info(cursor, db) - if (encoding and + if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']): raise NotSupportedError( 'Changing database encoding is not supported. ' @@ -204,7 +209,7 @@ def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype): return False else: db_info = get_db_info(cursor, db) - if (encoding and + if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']): return False elif lc_collate and lc_collate != db_info['lc_collate']: @@ -226,6 +231,7 @@ def main(): login_user=dict(default="postgres"), login_password=dict(default=""), login_host=dict(default=""), + login_unix_socket=dict(default=""), port=dict(default="5432"), db=dict(required=True, aliases=['name']), owner=dict(default=""), @@ -251,7 +257,7 @@ def main(): state = module.params["state"] changed = False - # To use defaults values, keyword arguments must be absent, so + # To use defaults values, keyword arguments must be absent, so # check which values are empty and don't include in the **kw # dictionary params_map = { @@ -260,8 +266,14 @@ def main(): "login_password":"password", "port":"port" } - kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() + kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() if k in params_map and v != '' ) + + # If a login_unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" + if is_localhost and module.params["login_unix_socket"] != "": + kw["host"] = module.params["login_unix_socket"] + try: db_connection = psycopg2.connect(database="template1", **kw) # Enable autocommit so we can create databases From 3a3ff1f0e46b68b67f5daa8b981411410350dd0f Mon Sep 17 00:00:00 2001 From: Dan Date: Mon, 29 Sep 2014 16:06:28 -0700 Subject: [PATCH 121/250] Adds a unix_socket/login_unix_socket option to the postgresql_user module. --- database/postgresql/postgresql_user.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index 00001bdcd7b..3581d2ea45c 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -78,6 +78,11 @@ options: - Host running PostgreSQL. required: false default: localhost + login_unix_socket: + description + - Path to a Unix domain socket for local connections + required: false + default: null priv: description: - "PostgreSQL privileges string in the format: C(table:priv1,priv2)" @@ -456,6 +461,7 @@ def main(): login_user=dict(default="postgres"), login_password=dict(default=""), login_host=dict(default=""), + login_unix_socket=dict(default=""), user=dict(required=True, aliases=['name']), password=dict(default=None), state=dict(default="present", choices=["absent", "present"]), @@ -504,6 +510,12 @@ def main(): } kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() if k in params_map and v != "" ) + + # If a login_unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" + if is_localhost and module.params["login_unix_socket"] != "": + kw["host"] = module.params["login_unix_socket"] + try: db_connection = psycopg2.connect(**kw) cursor = db_connection.cursor() From 084ccf5a6460569780cfd3c33073e78920559ae9 Mon Sep 17 00:00:00 2001 From: Dan Date: Mon, 29 Sep 2014 16:06:42 -0700 Subject: [PATCH 122/250] Adds a login_unix_socket option to the postgresql_privs module. --- database/postgresql/postgresql_privs.py | 39 ++++++++++++++++--------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/database/postgresql/postgresql_privs.py b/database/postgresql/postgresql_privs.py index febdc9edf17..e618beefcc9 100644 --- a/database/postgresql/postgresql_privs.py +++ b/database/postgresql/postgresql_privs.py @@ -29,7 +29,7 @@ description: options: database: description: - - Name of database to connect to. + - Name of database to connect to. - 'Alias: I(db)' required: yes state: @@ -53,7 +53,7 @@ options: schema, language, tablespace, group] objs: description: - - Comma separated list of database objects to set privileges on. + - Comma separated list of database objects to set privileges on. - If I(type) is C(table) or C(sequence), the special value C(ALL_IN_SCHEMA) can be provided instead to specify all database objects of type I(type) in the schema specified via I(schema). (This @@ -99,6 +99,12 @@ options: - Database port to connect to. required: no default: 5432 + unix_socket: + description + - Path to a Unix domain socket for local connections. + - 'Alias: I(login_unix_socket)' + required: false + default: null login: description: - The username to authenticate with. @@ -135,7 +141,7 @@ author: Bernhard Weitzhofer EXAMPLES = """ # On database "library": -# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors +# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors # TO librarian, reader WITH GRANT OPTION - postgresql_privs: > database=library @@ -155,8 +161,8 @@ EXAMPLES = """ roles=librarian,reader grant_option=yes -# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader -# Note that role "reader" will be *granted* INSERT privilege itself if this +# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader +# Note that role "reader" will be *granted* INSERT privilege itself if this # isn't already the case (since state=present). - postgresql_privs: > db=library @@ -214,7 +220,7 @@ EXAMPLES = """ role=librarian # GRANT ALL PRIVILEGES ON DATABASE library TO librarian -# If objs is omitted for type "database", it defaults to the database +# If objs is omitted for type "database", it defaults to the database # to which the connection is established - postgresql_privs: > db=library @@ -267,6 +273,12 @@ class Connection(object): } kw = dict( (params_map[k], getattr(params, k)) for k in params_map if getattr(params, k) != '' ) + + # If a unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" + if is_localhost and params.unix_socket != "": + kw["host"] = params.unix_socket + self.connection = psycopg2.connect(**kw) self.cursor = self.connection.cursor() @@ -389,9 +401,9 @@ class Connection(object): def get_group_memberships(self, groups): query = """SELECT roleid, grantor, member, admin_option - FROM pg_catalog.pg_auth_members am + FROM pg_catalog.pg_auth_members am JOIN pg_catalog.pg_roles r ON r.oid = am.roleid - WHERE r.rolname = ANY(%s) + WHERE r.rolname = ANY(%s) ORDER BY roleid, grantor, member""" self.cursor.execute(query, (groups,)) return self.cursor.fetchall() @@ -405,14 +417,14 @@ class Connection(object): :param obj_type: Type of database object to grant/revoke privileges for. - :param privs: Either a list of privileges to grant/revoke + :param privs: Either a list of privileges to grant/revoke or None if type is "group". :param objs: List of database objects to grant/revoke privileges for. :param roles: Either a list of role names or "PUBLIC" for the implicitly defined "PUBLIC" group :param state: "present" to grant privileges, "absent" to revoke. - :param grant_option: Only for state "present": If True, set + :param grant_option: Only for state "present": If True, set grant/admin option. If False, revoke it. If None, don't change grant option. :param schema_qualifier: Some object types ("TABLE", "SEQUENCE", @@ -481,7 +493,7 @@ class Connection(object): else: query = 'GRANT %s TO %s WITH GRANT OPTION' else: - query = 'GRANT %s TO %s' + query = 'GRANT %s TO %s' self.cursor.execute(query % (set_what, for_whom)) # Only revoke GRANT/ADMIN OPTION if grant_option actually is False. @@ -492,7 +504,7 @@ class Connection(object): query = 'REVOKE GRANT OPTION FOR %s FROM %s' self.cursor.execute(query % (set_what, for_whom)) else: - query = 'REVOKE %s FROM %s' + query = 'REVOKE %s FROM %s' self.cursor.execute(query % (set_what, for_whom)) status_after = get_status(objs) return status_before != status_after @@ -516,10 +528,11 @@ def main(): objs=dict(required=False, aliases=['obj']), schema=dict(required=False), roles=dict(required=True, aliases=['role']), - grant_option=dict(required=False, type='bool', + grant_option=dict(required=False, type='bool', aliases=['admin_option']), host=dict(default='', aliases=['login_host']), port=dict(type='int', default=5432), + unix_socket=dict(default='', aliases=['login_unix_socket']), login=dict(default='postgres', aliases=['login_user']), password=dict(default='', aliases=['login_password']) ), From 3a80b734e6e4c1ebe8cbd40b4957a7589520caf5 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 1 Dec 2014 10:38:47 -0800 Subject: [PATCH 123/250] Escape % in db+table names before adding to a format string being passed into db.execute() Fixes #416 --- database/mysql/mysql_user.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index e8461a05851..1fc57dc9534 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -184,7 +184,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs): changed = False grant_option = False - # Handle passwords. + # Handle passwords if password is not None: cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host)) current_pass_hash = cursor.fetchone() @@ -194,7 +194,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs): cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password)) changed = True - # Handle privileges. + # Handle privileges if new_priv is not None: curr_priv = privileges_get(cursor, user,host) @@ -297,6 +297,8 @@ def privileges_unpack(priv): return output def privileges_revoke(cursor, user,host,db_table,grant_option): + # Escape '%' since mysql db.execute() uses a format string + db_table = db_table.replace('%', '%%') if grant_option: query = ["REVOKE GRANT OPTION ON %s" % mysql_quote_identifier(db_table, 'table')] query.append("FROM %s@%s") @@ -308,7 +310,9 @@ def privileges_revoke(cursor, user,host,db_table,grant_option): cursor.execute(query, (user, host)) def privileges_grant(cursor, user,host,db_table,priv): - + # Escape '%' since mysql db.execute uses a format string and the + # specification of db and table often use a % (SQL wildcard) + db_table = db_table.replace('%', '%%') priv_string = ",".join(filter(lambda x: x != 'GRANT', priv)) query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))] query.append("TO %s@%s") From b37a2328c0cba611c73ad5d1094a7ec4d1cfcdf1 Mon Sep 17 00:00:00 2001 From: Philip Misiowiec Date: Fri, 26 Sep 2014 19:29:08 -0700 Subject: [PATCH 124/250] Ability to specify new SSD EBS option --- cloud/amazon/ec2_vol.py | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 0e662a77bdd..52a2ed8e9ae 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -48,6 +48,13 @@ options: required: false default: null aliases: [] + volume_type: + description: + - Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default + and continues to remain the Ansible default for backwards compatibility. + required: false + default: standard + aliases: [] iops: description: - the provisioned IOPs you want to associate with this volume (integer). @@ -173,6 +180,14 @@ EXAMPLES = ''' module: ec2_vol instance: i-XXXXXX state: list + +# Create new volume using SSD storage +- local_action: + module: ec2_vol + instance: XXXXXX + volume_size: 50 + volume_type: gp2 + device_name: /dev/xvdf ''' # Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes. @@ -253,12 +268,11 @@ def create_volume(module, ec2, zone): iops = module.params.get('iops') encrypted = module.params.get('encrypted') volume_size = module.params.get('volume_size') + volume_type = module.params.get('volume_type') snapshot = module.params.get('snapshot') # If custom iops is defined we use volume_type "io1" rather than the default of "standard" if iops: volume_type = 'io1' - else: - volume_type = 'standard' # If no instance supplied, try volume creation based on module parameters. if name or id: @@ -338,6 +352,7 @@ def main(): id = dict(), name = dict(), volume_size = dict(), + volume_type = dict(choices=['standard', 'gp2', 'io1'], default='standard'), iops = dict(), encrypted = dict(), device_name = dict(), @@ -352,6 +367,7 @@ def main(): name = module.params.get('name') instance = module.params.get('instance') volume_size = module.params.get('volume_size') + volume_type = module.params.get('volume_type') iops = module.params.get('iops') encrypted = module.params.get('encrypted') device_name = module.params.get('device_name') @@ -425,7 +441,7 @@ def main(): volume = create_volume(module, ec2, zone) if instance: attach_volume(module, ec2, volume, inst) - module.exit_json(volume_id=volume.id, device=device_name) + module.exit_json(volume_id=volume.id, device=device_name, volume_type=volume.type) # import module snippets from ansible.module_utils.basic import * From c241e9dd9c7175f217dcb1db9712fb9a291de0d9 Mon Sep 17 00:00:00 2001 From: Philip Misiowiec Date: Mon, 29 Sep 2014 17:49:27 -0500 Subject: [PATCH 125/250] added version --- cloud/amazon/ec2_vol.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 52a2ed8e9ae..56a2c6fc60f 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -55,6 +55,7 @@ options: required: false default: standard aliases: [] + version_added: "1.8" iops: description: - the provisioned IOPs you want to associate with this volume (integer). From f5789e8eda59a475e81a8e5a2494363c94c8db1f Mon Sep 17 00:00:00 2001 From: Philip Misiowiec Date: Fri, 26 Sep 2014 23:03:22 -0700 Subject: [PATCH 126/250] Support for EC2 dedicated tenancy option --- cloud/amazon/ec2.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 04e419ea1f1..8d25c3196a8 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -67,6 +67,12 @@ options: required: true default: null aliases: [] + tenancy: + description: + - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are:"default" or "dedicated". NOTE: To use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. + required: false + default: default + aliases: [] spot_price: version_added: "1.5" description: @@ -312,6 +318,18 @@ local_action: vpc_subnet_id: subnet-29e63245 assign_public_ip: yes +# Dedicated tenancy example +- local_action: + module: ec2 + assign_public_ip: yes + group_id: sg-1dc53f72 + key_name: mykey + image: ami-6e649707 + instance_type: m1.small + tenancy: dedicated + vpc_subnet_id: subnet-29e63245 + wait: yes + # Spot instance example - local_action: module: ec2 @@ -728,6 +746,7 @@ def create_instances(module, ec2, override_count=None): group_id = module.params.get('group_id') zone = module.params.get('zone') instance_type = module.params.get('instance_type') + tenancy = module.params.get('tenancy') spot_price = module.params.get('spot_price') image = module.params.get('image') if override_count: @@ -811,6 +830,9 @@ def create_instances(module, ec2, override_count=None): if ebs_optimized: params['ebs_optimized'] = ebs_optimized + + if tenancy: + params['tenancy'] = tenancy if boto_supports_profile_name_arg(ec2): params['instance_profile_name'] = instance_profile_name @@ -1153,6 +1175,7 @@ def main(): count_tag = dict(), volumes = dict(type='list'), ebs_optimized = dict(type='bool', default=False), + tenancy = dict(default='default'), ) ) From 59701feadba064e5880a6b31ec3c019604118976 Mon Sep 17 00:00:00 2001 From: Philip Misiowiec Date: Mon, 29 Sep 2014 17:48:12 -0500 Subject: [PATCH 127/250] added version --- cloud/amazon/ec2.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 8d25c3196a8..050ed0b63f4 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -68,6 +68,7 @@ options: default: null aliases: [] tenancy: + version_added: "1.8" description: - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are:"default" or "dedicated". NOTE: To use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. required: false From c6b0d469acbb1a1b0508bacaedc5456eb5e9be83 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 1 Dec 2014 14:46:07 -0500 Subject: [PATCH 128/250] Misc EC2 doc tweaks. --- cloud/amazon/cloudformation.py | 17 +++-- cloud/amazon/ec2.py | 128 ++++++++++++++++----------------- 2 files changed, 70 insertions(+), 75 deletions(-) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index 162c8d8cd16..4048a9922bf 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -20,7 +20,6 @@ module: cloudformation short_description: create a AWS CloudFormation stack description: - Launches an AWS CloudFormation stack and waits for it complete. -version_added: "1.1" options: stack_name: description: @@ -98,17 +97,17 @@ EXAMPLES = ''' tasks: - name: launch ansible cloudformation example cloudformation: - stack_name="ansible-cloudformation" state=present - region=us-east-1 disable_rollback=true - template=files/cloudformation-example.json - args: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1 disable_rollback=true" + template: "files/cloudformation-example.json" template_parameters: - KeyName: jmartin - DiskType: ephemeral - InstanceType: m1.small + KeyName: "jmartin" + DiskType: "ephemeral" + InstanceType: "m1.small" ClusterSize: 3 tags: - Stack: ansible-cloudformation + Stack: "ansible-cloudformation" ''' import json diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 04e419ea1f1..87dc80b5e36 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -17,10 +17,9 @@ DOCUMENTATION = ''' --- module: ec2 -short_description: create, terminate, start or stop an instance in ec2, return instanceid +short_description: create, terminate, start or stop an instance in ec2 description: - - Creates or terminates ec2 instances. When created optionally waits for it to be 'running'. This module has a dependency on python-boto >= 2.5 -version_added: "0.9" + - Creates or terminates ec2 instances. options: key_name: description: @@ -28,12 +27,6 @@ options: required: false default: null aliases: ['keypair'] - id: - description: - - identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). - required: false - default: null - aliases: [] group: description: - security group (or list of groups) to use with the instance @@ -76,7 +69,7 @@ options: aliases: [] image: description: - - I(emi) (or I(ami)) to use for the instance + - I(ami) ID to use for the instance required: true default: null aliases: [] @@ -94,7 +87,7 @@ options: aliases: [] wait: description: - - wait for the instance to be in state 'running' before returning + - wait for the instance to be 'running' before returning. Does not wait for SSH, see 'wait_for' example for details. required: false default: "no" choices: [ "yes", "no" ] @@ -226,54 +219,55 @@ extends_documentation_fragment: aws ''' EXAMPLES = ''' -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. +# Note: These examples do not set authentication details, see the AWS Guide for details. # Basic provisioning example -- local_action: - module: ec2 +- ec2: key_name: mykey - instance_type: c1.medium - image: emi-40603AD1 + instance_type: t2.micro + image: ami-123456 wait: yes group: webserver count: 3 + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes # Advanced example with tagging and CloudWatch -- local_action: - module: ec2 +- ec2: key_name: mykey group: databases - instance_type: m1.large - image: ami-6e649707 + instance_type: t2.micro + image: ami-123456 wait: yes wait_timeout: 500 count: 5 instance_tags: db: postgres monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes # Single instance with additional IOPS volume from snapshot and volume delete on termination -local_action: - module: ec2 +- ec2: key_name: mykey group: webserver - instance_type: m1.large - image: ami-6e649707 + instance_type: c3.medium + image: ami-123456 wait: yes wait_timeout: 500 volumes: - - device_name: /dev/sdb - snapshot: snap-abcdef12 - device_type: io1 - iops: 1000 - volume_size: 100 - delete_on_termination: true + - device_name: /dev/sdb + snapshot: snap-abcdef12 + device_type: io1 + iops: 1000 + volume_size: 100 + delete_on_termination: true monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes # Multiple groups example -local_action: - module: ec2 +- ec2: key_name: mykey group: ['databases', 'internal-services', 'sshable', 'and-so-forth'] instance_type: m1.large @@ -284,10 +278,11 @@ local_action: instance_tags: db: postgres monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes # Multiple instances with additional volume from snapshot -local_action: - module: ec2 +- ec2: key_name: mykey group: webserver instance_type: m1.large @@ -300,21 +295,11 @@ local_action: snapshot: snap-abcdef12 volume_size: 10 monitoring: yes - -# VPC example -- local_action: - module: ec2 - key_name: mykey - group_id: sg-1dc53f72 - instance_type: m1.small - image: ami-6e649707 - wait: yes vpc_subnet_id: subnet-29e63245 assign_public_ip: yes # Spot instance example -- local_action: - module: ec2 +- ec2: spot_price: 0.24 spot_wait_timeout: 600 keypair: mykey @@ -328,7 +313,6 @@ local_action: # Launch instances, runs some tasks # and then terminate them - - name: Create a sandbox instance hosts: localhost gather_facts: False @@ -340,13 +324,21 @@ local_action: region: us-east-1 tasks: - name: Launch instance - local_action: ec2 key_name={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image }} wait=true region={{ region }} + ec2: + key_name: "{{ keypair }}" + group: "{{ security_group }}" + instance_type: "{{ instance_type }}" + image: "{{ image }}" + wait: true + region: "{{ region }}" + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes register: ec2 - name: Add new instance to host group - local_action: add_host hostname={{ item.public_ip }} groupname=launched + add_host: hostname={{ item.public_ip }} groupname=launched with_items: ec2.instances - name: Wait for SSH to come up - local_action: wait_for host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started + wait_for: host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started with_items: ec2.instances - name: Configure instance(s) @@ -362,8 +354,7 @@ local_action: connection: local tasks: - name: Terminate instances that were previously launched - local_action: - module: ec2 + ec2: state: 'absent' instance_ids: '{{ ec2.instance_ids }}' @@ -382,12 +373,13 @@ local_action: region: us-east-1 tasks: - name: Start the sandbox instances - local_action: - module: ec2 + ec2: instance_ids: '{{ instance_ids }}' region: '{{ region }}' state: running wait: True + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes role: - do_neat_stuff - do_more_neat_stuff @@ -404,38 +396,40 @@ local_action: region: us-east-1 tasks: - name: Stop the sandbox instances - local_action: - module: ec2 - instance_ids: '{{ instance_ids }}' - region: '{{ region }}' - state: stopped - wait: True + ec2: + instance_ids: '{{ instance_ids }}' + region: '{{ region }}' + state: stopped + wait: True + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes # # Enforce that 5 instances with a tag "foo" are running +# (Highly recommended!) # -- local_action: - module: ec2 +- ec2: key_name: mykey instance_type: c1.medium - image: emi-40603AD1 + image: ami-40603AD1 wait: yes group: webserver instance_tags: foo: bar exact_count: 5 count_tag: foo + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes # # Enforce that 5 running instances named "database" with a "dbtype" of "postgres" # -- local_action: - module: ec2 +- ec2: key_name: mykey instance_type: c1.medium - image: emi-40603AD1 + image: ami-40603AD1 wait: yes group: webserver instance_tags: @@ -445,6 +439,8 @@ local_action: count_tag: Name: database dbtype: postgres + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes # # count_tag complex argument examples From e7c92a6bc6e95ae3333bdb364ef904f19cf43c00 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 1 Dec 2014 15:14:57 -0500 Subject: [PATCH 129/250] More EC2 doc tweaks --- cloud/amazon/ec2_ami.py | 22 ++++------- cloud/amazon/ec2_ami_search.py | 3 +- cloud/amazon/ec2_asg.py | 13 ++++--- cloud/amazon/ec2_eip.py | 4 +- cloud/amazon/ec2_elb.py | 10 ++--- cloud/amazon/ec2_elb_lb.py | 5 ++- cloud/amazon/ec2_group.py | 3 +- cloud/amazon/ec2_key.py | 14 +++---- cloud/amazon/ec2_lc.py | 2 +- cloud/amazon/ec2_snapshot.py | 9 ++--- cloud/amazon/ec2_tag.py | 6 +-- cloud/amazon/ec2_vol.py | 32 +++++----------- cloud/amazon/ec2_vpc.py | 9 ++--- cloud/amazon/elasticache.py | 9 ++--- cloud/amazon/rds.py | 57 +++++++++++++-------------- cloud/amazon/rds_param_group.py | 19 ++++----- cloud/amazon/rds_subnet_group.py | 9 ++--- cloud/amazon/route53.py | 66 ++++++++++++++++---------------- cloud/amazon/s3.py | 23 ++++------- 19 files changed, 140 insertions(+), 175 deletions(-) diff --git a/cloud/amazon/ec2_ami.py b/cloud/amazon/ec2_ami.py index 3baf70a438f..c50e5a88048 100644 --- a/cloud/amazon/ec2_ami.py +++ b/cloud/amazon/ec2_ami.py @@ -18,9 +18,9 @@ DOCUMENTATION = ''' --- module: ec2_ami version_added: "1.3" -short_description: create or destroy an image in ec2, return imageid +short_description: create or destroy an image in ec2 description: - - Creates or deletes ec2 images. This module has a dependency on python-boto >= 2.5 + - Creates or deletes ec2 images. options: instance_id: description: @@ -89,13 +89,10 @@ extends_documentation_fragment: aws ''' # Thank you to iAcquire for sponsoring development of this module. -# -# See http://alestic.com/2011/06/ec2-ami-security for more information about ensuring the security of your AMI. EXAMPLES = ''' # Basic AMI Creation -- local_action: - module: ec2_ami +- ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx instance_id: i-xxxxxx @@ -104,8 +101,7 @@ EXAMPLES = ''' register: instance # Basic AMI Creation, without waiting -- local_action: - module: ec2_ami +- ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx region: xxxxxx @@ -115,22 +111,20 @@ EXAMPLES = ''' register: instance # Deregister/Delete AMI -- local_action: - module: ec2_ami +- ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx region: xxxxxx - image_id: ${instance.image_id} + image_id: "{{ instance.image_id }}" delete_snapshot: True state: absent # Deregister AMI -- local_action: - module: ec2_ami +- ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx region: xxxxxx - image_id: ${instance.image_id} + image_id: "{{ instance.image_id }}" delete_snapshot: False state: absent diff --git a/cloud/amazon/ec2_ami_search.py b/cloud/amazon/ec2_ami_search.py index 25875de39bd..70664cf5f8d 100644 --- a/cloud/amazon/ec2_ami_search.py +++ b/cloud/amazon/ec2_ami_search.py @@ -16,10 +16,11 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + DOCUMENTATION = ''' --- module: ec2_ami_search -short_description: Retrieve AWS AMI for a given operating system. +short_description: Retrieve AWS AMI information for a given operating system. version_added: "1.6" description: - Look up the most recent AMI on AWS for a given operating system. diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 2b060ccca37..da922530770 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -119,21 +119,23 @@ extends_documentation_fragment: aws """ EXAMPLES = ''' -A basic example of configuration: +# Basic configuration - ec2_asg: name: special - load_balancers: 'lb1,lb2' - availability_zones: 'eu-west-1a,eu-west-1b' + load_balancers: [ 'lb1', 'lb2' ] + availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] launch_config_name: 'lc-1' min_size: 1 max_size: 10 desired_capacity: 5 - vpc_zone_identifier: 'subnet-abcd1234,subnet-1a2b3c4d' + vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] tags: - environment: production propagate_at_launch: no +# Rolling ASG Updates + Below is an example of how to assign a new launch config to an ASG and terminate old instances. All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in @@ -415,9 +417,10 @@ def replace(connection, module): max_size = module.params.get('max_size') min_size = module.params.get('min_size') desired_capacity = module.params.get('desired_capacity') + + # FIXME: we need some more docs about this feature replace_instances = module.params.get('replace_instances') - # wait for instance list to be populated on a newly provisioned ASG instance_wait = time.time() + 30 while instance_wait > time.time(): diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py index cff83e482b3..fd0e8d04568 100644 --- a/cloud/amazon/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -69,13 +69,13 @@ EXAMPLES = ''' ec2_eip: instance_id=i-1212f003 - name: allocate a new elastic IP without associating it to anything - ec2_eip: + action: ec2_eip register: eip - name: output the IP debug: msg="Allocated IP is {{ eip.public_ip }}" - name: provision new instances with ec2 - ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes group=webserver count=3 + ec2: keypair=mykey instance_type=c1.medium image=ami-40603AD1 wait=yes group=webserver count=3 register: ec2 - name: associate new elastic IPs with each of the instances ec2_eip: "instance_id={{ item }}" diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py index 42cb1819025..52f8c4a4bf9 100644 --- a/cloud/amazon/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -80,18 +80,18 @@ EXAMPLES = """ # basic pre_task and post_task example pre_tasks: - name: Gathering ec2 facts - ec2_facts: + action: ec2_facts - name: Instance De-register - local_action: ec2_elb - args: + local_action: + module: ec2_elb instance_id: "{{ ansible_ec2_instance_id }}" state: 'absent' roles: - myrole post_tasks: - name: Instance Register - local_action: ec2_elb - args: + local_action: + module: ec2_elb instance_id: "{{ ansible_ec2_instance_id }}" ec2_elbs: "{{ item }}" state: 'present' diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 462fbbcc797..6235770eb38 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -115,7 +115,8 @@ EXAMPLES = """ # Note: None of these examples set aws_access_key, aws_secret_key, or region. # It is assumed that their matching environment variables are set. -# Basic provisioning example +# Basic provisioning example (non-VPC) + - local_action: module: ec2_elb_lb name: "test-please-delete" @@ -134,8 +135,8 @@ EXAMPLES = """ # ssl certificate required for https or ssl ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert" +# Internal ELB example -# Basic VPC provisioning example - local_action: module: ec2_elb_lb name: "test-vpc" diff --git a/cloud/amazon/ec2_group.py b/cloud/amazon/ec2_group.py index 822147468a2..7d081a29620 100644 --- a/cloud/amazon/ec2_group.py +++ b/cloud/amazon/ec2_group.py @@ -70,8 +70,7 @@ notes: EXAMPLES = ''' - name: example ec2 group - local_action: - module: ec2_group + ec2_group: name: example description: an example EC2 group vpc_id: 12345 diff --git a/cloud/amazon/ec2_key.py b/cloud/amazon/ec2_key.py index 9c8274f764a..9f548496c4a 100644 --- a/cloud/amazon/ec2_key.py +++ b/cloud/amazon/ec2_key.py @@ -56,15 +56,13 @@ EXAMPLES = ''' # Creates a new ec2 key pair named `example` if not present, returns generated # private key - name: example ec2 key - local_action: - module: ec2_key + ec2_key: name: example # Creates a new ec2 key pair named `example` if not present using provided key -# material +# material. This could use the 'file' lookup plugin to pull this off disk. - name: example2 ec2 key - local_action: - module: ec2_key + ec2_key: name: example2 key_material: 'ssh-rsa AAAAxyz...== me@example.com' state: present @@ -72,16 +70,14 @@ EXAMPLES = ''' # Creates a new ec2 key pair named `example` if not present using provided key # material - name: example3 ec2 key - local_action: - module: ec2_key + ec2_key: name: example3 key_material: "{{ item }}" with_file: /path/to/public_key.id_rsa.pub # Removes ec2 key pair by name - name: remove example key - local_action: - module: ec2_key + ec2_key: name: example state: absent ''' diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index f75dfe6d938..8fbdcea3e66 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -125,7 +125,7 @@ EXAMPLES = ''' name: special image_id: ami-XXX key_name: default - security_groups: 'group,group2' + security_groups: ['group', 'group2' ] instance_type: t1.micro ''' diff --git a/cloud/amazon/ec2_snapshot.py b/cloud/amazon/ec2_snapshot.py index 4c21ae6ff7b..eb77840e638 100644 --- a/cloud/amazon/ec2_snapshot.py +++ b/cloud/amazon/ec2_snapshot.py @@ -68,21 +68,18 @@ extends_documentation_fragment: aws EXAMPLES = ''' # Simple snapshot of volume using volume_id -- local_action: - module: ec2_snapshot +- ec2_snapshot: volume_id: vol-abcdef12 description: snapshot of /data from DB123 taken 2013/11/28 12:18:32 # Snapshot of volume mounted on device_name attached to instance_id -- local_action: - module: ec2_snapshot +- ec2_snapshot: instance_id: i-12345678 device_name: /dev/sdb1 description: snapshot of /data from DB123 taken 2013/11/28 12:18:32 # Snapshot of volume with tagging -- local_action: - module: ec2_snapshot +- ec2_snapshot: instance_id: i-12345678 device_name: /dev/sdb1 snapshot_tags: diff --git a/cloud/amazon/ec2_tag.py b/cloud/amazon/ec2_tag.py index 4a33112189a..ab4b87bfa84 100644 --- a/cloud/amazon/ec2_tag.py +++ b/cloud/amazon/ec2_tag.py @@ -50,7 +50,7 @@ EXAMPLES = ''' # Basic example of adding tag(s) tasks: - name: tag a resource - local_action: ec2_tag resource=vol-XXXXXX region=eu-west-1 state=present + ec2_tag: resource=vol-XXXXXX region=eu-west-1 state=present args: tags: Name: ubervol @@ -59,11 +59,11 @@ tasks: # Playbook example of adding tag(s) to spawned instances tasks: - name: launch some instances - local_action: ec2 keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image_id }} wait=true region=eu-west-1 + ec2: keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image_id }} wait=true region=eu-west-1 register: ec2 - name: tag my launched instances - local_action: ec2_tag resource={{ item.id }} region=eu-west-1 state=present + ec2_tag: resource={{ item.id }} region=eu-west-1 state=present with_items: ec2.instances args: tags: diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 0e662a77bdd..f9523ba3ecc 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -105,36 +105,31 @@ extends_documentation_fragment: aws EXAMPLES = ''' # Simple attachment action -- local_action: - module: ec2_vol +- ec2_vol: instance: XXXXXX volume_size: 5 device_name: sdd # Example using custom iops params -- local_action: - module: ec2_vol +- ec2_vol: instance: XXXXXX volume_size: 5 iops: 200 device_name: sdd # Example using snapshot id -- local_action: - module: ec2_vol +- ec2_vol: instance: XXXXXX snapshot: "{{ snapshot }}" # Playbook example combined with instance launch -- local_action: - module: ec2 +- ec2: keypair: "{{ keypair }}" image: "{{ image }}" wait: yes count: 3 register: ec2 -- local_action: - module: ec2_vol +- ec2_vol: instance: "{{ item.id }} " volume_size: 5 with_items: ec2.instances @@ -144,8 +139,7 @@ EXAMPLES = ''' # * Nothing will happen if the volume is already attached. # * Volume must exist in the same zone. -- local_action: - module: ec2 +- ec2: keypair: "{{ keypair }}" image: "{{ image }}" zone: YYYYYY @@ -154,8 +148,7 @@ EXAMPLES = ''' count: 1 register: ec2 -- local_action: - module: ec2_vol +- ec2_vol: instance: "{{ item.id }}" name: my_existing_volume_Name_tag device_name: /dev/xvdf @@ -163,23 +156,16 @@ EXAMPLES = ''' register: ec2_vol # Remove a volume -- local_action: - module: ec2_vol +- ec2_vol: id: vol-XXXXXXXX state: absent # List volumes for an instance -- local_action: - module: ec2_vol +- ec2_vol: instance: i-XXXXXX state: list ''' -# Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes. -# if state=present and it doesn't exist, create, tag and attach. -# Check for state by looking for volume attachment with tag (and against block device mapping?). -# Would personally like to revisit this in May when Eucalyptus also has tagging support (3.3). - import sys import time diff --git a/cloud/amazon/ec2_vpc.py b/cloud/amazon/ec2_vpc.py index e4dc9a65f7d..00528f27849 100644 --- a/cloud/amazon/ec2_vpc.py +++ b/cloud/amazon/ec2_vpc.py @@ -130,16 +130,14 @@ EXAMPLES = ''' # It is assumed that their matching environment variables are set. # Basic creation example: - local_action: - module: ec2_vpc + ec2_vpc: state: present cidr_block: 172.23.0.0/16 resource_tags: { "Environment":"Development" } region: us-west-2 # Full creation example with subnets and optional availability zones. # The absence or presence of subnets deletes or creates them respectively. - local_action: - module: ec2_vpc + ec2_vpc: state: present cidr_block: 172.22.0.0/16 resource_tags: { "Environment":"Development" } @@ -170,8 +168,7 @@ EXAMPLES = ''' register: vpc # Removal of a VPC by id - local_action: - module: ec2_vpc + ec2_vpc: state: absent vpc_id: vpc-aaaaaaa region: us-west-2 diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index 8c82f2fcc20..c1846f525a8 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -111,8 +111,7 @@ EXAMPLES = """ # It is assumed that their matching environment variables are set. # Basic example -- local_action: - module: elasticache +- elasticache: name: "test-please-delete" state: present engine: memcached @@ -126,14 +125,12 @@ EXAMPLES = """ # Ensure cache cluster is gone -- local_action: - module: elasticache +- elasticache: name: "test-please-delete" state: absent # Reboot cache cluster -- local_action: - module: elasticache +- elasticache: name: "test-please-delete" state: rebooted diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index ba3f1e38d39..d6fd1622161 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -224,44 +224,45 @@ requirements: [ "boto" ] author: Bruce Pennypacker ''' +# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD + EXAMPLES = ''' # Basic mysql provisioning example -- rds: > - command=create - instance_name=new_database - db_engine=MySQL - size=10 - instance_type=db.m1.small - username=mysql_admin - password=1nsecure +- rds: + command: create + instance_name: new_database + db_engine: MySQL + size: 10 + instance_type: db.m1.small + username: mysql_admin + password: 1nsecure # Create a read-only replica and wait for it to become available -- rds: > - command=replicate - instance_name=new_database_replica - source_instance=new_database - wait=yes - wait_timeout=600 +- rds: + command: replicate + instance_name: new_database_replica + source_instance: new_database + wait: yes + wait_timeout: 600 # Delete an instance, but create a snapshot before doing so -- rds: > - command=delete - instance_name=new_database - snapshot=new_database_snapshot +- rds: + command: delete + instance_name: new_database + snapshot: new_database_snapshot # Get facts about an instance -- rds: > - command=facts - instance_name=new_database - register: new_database_facts +- rds: + command: facts + instance_name: new_database + register: new_database_facts # Rename an instance and wait for the change to take effect -- rds: > - command=modify - instance_name=new_database - new_instance_name=renamed_database - wait=yes - +- rds: + command: modify + instance_name: new_database + new_instance_name: renamed_database + wait: yes ''' import sys diff --git a/cloud/amazon/rds_param_group.py b/cloud/amazon/rds_param_group.py index 39f9432057a..d1559ac78ae 100644 --- a/cloud/amazon/rds_param_group.py +++ b/cloud/amazon/rds_param_group.py @@ -85,17 +85,18 @@ author: Scott Anderson EXAMPLES = ''' # Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024 -- rds_param_group: > - state=present - name=norwegian_blue - description=My Fancy Ex Parrot Group - engine=mysql5.6 - params='{"auto_increment_increment": "42K"}' +- rds_param_group: + state: present + name: norwegian_blue + description: 'My Fancy Ex Parrot Group' + engine: 'mysql5.6' + params: + auto_increment_increment: "42K" # Remove a parameter group -- rds_param_group: > - state=absent - name=norwegian_blue +- rds_param_group: + state: absent + name: norwegian_blue ''' import sys diff --git a/cloud/amazon/rds_subnet_group.py b/cloud/amazon/rds_subnet_group.py index 552c94f188a..bba6cd86872 100644 --- a/cloud/amazon/rds_subnet_group.py +++ b/cloud/amazon/rds_subnet_group.py @@ -71,8 +71,7 @@ author: Scott Anderson EXAMPLES = ''' # Add or change a subnet group -- local_action: - module: rds_subnet_group +- rds_subnet_group state: present name: norwegian-blue description: My Fancy Ex Parrot Subnet Group @@ -81,9 +80,9 @@ EXAMPLES = ''' - subnet-bbbbbbbb # Remove a parameter group -- rds_param_group: > - state=absent - name=norwegian-blue +- rds_param_group: + state: absent + name: norwegian-blue ''' import sys diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index b3878e0580e..9958b221031 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -88,51 +88,53 @@ requirements: [ "boto" ] author: Bruce Pennypacker ''' +# FIXME: the command stuff should have a more state like configuration alias -- MPD + EXAMPLES = ''' # Add new.foo.com as an A record with 3 IPs -- route53: > - command=create - zone=foo.com - record=new.foo.com - type=A - ttl=7200 - value=1.1.1.1,2.2.2.2,3.3.3.3 +- route53: + command: create + zone: foo.com + record: new.foo.com + type: A + ttl: 7200 + value: 1.1.1.1,2.2.2.2,3.3.3.3 # Retrieve the details for new.foo.com -- route53: > - command=get - zone=foo.com - record=new.foo.com - type=A +- route53: + command: get + zone: foo.com + record: new.foo.com + type: A register: rec # Delete new.foo.com A record using the results from the get command -- route53: > - command=delete - zone=foo.com - record={{ rec.set.record }} - type={{ rec.set.type }} - value={{ rec.set.value }} +- route53: + command: delete + zone: foo.com + record: "{{ rec.set.record }}" + type: "{{ rec.set.type }}" + value: "{{ rec.set.value }}" # Add an AAAA record. Note that because there are colons in the value # that the entire parameter list must be quoted: -- route53: > - command=create - zone=foo.com - record=localhost.foo.com - type=AAAA - ttl=7200 - value="::1" +- route53: + command: "create" + zone: "foo.com" + record: "localhost.foo.com" + type: "AAAA" + ttl: "7200" + value: "::1" # Add a TXT record. Note that TXT and SPF records must be surrounded # by quotes when sent to Route 53: -- route53: > - command=create - zone=foo.com - record=localhost.foo.com - type=TXT - ttl=7200 - value="\"bar\"" +- route53: + command: "create" + zone: "foo.com" + record: "localhost.foo.com" + type: "TXT" + ttl: "7200" + value: '"bar"' ''' diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 6438c6405e7..1db21893a4c 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -103,28 +103,19 @@ author: Lester Wade, Ralph Tice EXAMPLES = ''' # Simple PUT operation - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put + # Simple GET operation - s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get -# GET/download and overwrite local file (trust remote) -- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get -# GET/download and do not overwrite local file (trust remote) -- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get force=false -# PUT/upload and overwrite remote file (trust local) -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put + # PUT/upload with metadata -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip' -# PUT/upload with multiple metadata - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache' -# PUT/upload and do not overwrite remote file (trust local) -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put force=false -# Download an object as a string to use else where in your playbook -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=getstr + # Create an empty bucket - s3: bucket=mybucket mode=create -# Create a bucket with key as directory -- s3: bucket=mybucket object=/my/directory/path mode=create -# Create an empty bucket in the EU region -- s3: bucket=mybucket mode=create region=eu-west-1 + +# Create a bucket with key as directory, in the EU region +- s3: bucket=mybucket object=/my/directory/path mode=create region=eu-west-1 + # Delete a bucket and all contents - s3: bucket=mybucket mode=delete ''' From 9de0045d00973b0a277413df97f9fbe3034d8a25 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 1 Dec 2014 15:16:22 -0500 Subject: [PATCH 130/250] Remove potentially out of date or specific product references. --- cloud/amazon/ec2_facts.py | 2 -- cloud/amazon/ec2_tag.py | 5 ----- cloud/amazon/s3.py | 2 +- 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/cloud/amazon/ec2_facts.py b/cloud/amazon/ec2_facts.py index 7b5c610dc2d..9cae0989a95 100644 --- a/cloud/amazon/ec2_facts.py +++ b/cloud/amazon/ec2_facts.py @@ -34,8 +34,6 @@ description: - This module fetches data from the metadata servers in ec2 (aws) as per http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html. The module must be called from within the EC2 instance itself. - Eucalyptus cloud provides a similar service and this module should - work with this cloud provider as well. notes: - Parameters to filter on ec2_facts may be added later. author: "Silviu Dicu " diff --git a/cloud/amazon/ec2_tag.py b/cloud/amazon/ec2_tag.py index ab4b87bfa84..409041f906b 100644 --- a/cloud/amazon/ec2_tag.py +++ b/cloud/amazon/ec2_tag.py @@ -71,11 +71,6 @@ tasks: env: prod ''' -# Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes. -# if state=present and it doesn't exist, create, tag and attach. -# Check for state by looking for volume attachment with tag (and against block device mapping?). -# Would personally like to revisit this in May when Eucalyptus also has tagging support (3.3). - import sys import time diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 1db21893a4c..7b914dd9117 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -68,7 +68,7 @@ options: aliases: [] s3_url: description: - - "S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined. Ansible tries to guess if fakes3 (https://github.com/jubos/fake-s3) or Eucalyptus Walrus (https://github.com/eucalyptus/eucalyptus/wiki/Walrus) is used and configure connection accordingly. Current heuristic is: everything with scheme fakes3:// is fakes3, everything else not ending with amazonaws.com is Walrus." + - "S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS" default: null aliases: [ S3_URL ] aws_secret_key: From c03ec202a7519acc7a95118de3115b5894054518 Mon Sep 17 00:00:00 2001 From: Steve Fox Date: Mon, 1 Dec 2014 14:21:52 -0600 Subject: [PATCH 131/250] retry_interval needs to be a float before passing to time.sleep --- cloud/amazon/route53.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index b3878e0580e..9c8f54f123f 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -160,7 +160,7 @@ def commit(changes, retry_interval): code = code.split("")[0] if code != 'PriorRequestNotComplete' or retry < 0: raise e - time.sleep(retry_interval) + time.sleep(float(retry_interval)) def main(): argument_spec = ec2_argument_spec() From b287e7c0a65e7468cd626d52b5c37f56e92f7777 Mon Sep 17 00:00:00 2001 From: willthames Date: Thu, 1 May 2014 13:08:38 +1000 Subject: [PATCH 132/250] Added the ability to remove snapshots Added state option, and accompanying snapshot_id option for when state=absent. --- cloud/amazon/ec2_snapshot.py | 41 ++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_snapshot.py b/cloud/amazon/ec2_snapshot.py index eb77840e638..20cb017d81c 100644 --- a/cloud/amazon/ec2_snapshot.py +++ b/cloud/amazon/ec2_snapshot.py @@ -62,6 +62,19 @@ options: required: false default: 0 version_added: "1.5.1" + state: + description: + - whether to add or create a snapshot + required: false + default: present + choices: ['absent', 'present'] + version_added: "1.9" + snapshot_id: + description: + - snapshot id to remove + required: false + version_added: "1.9" + author: Will Thames extends_documentation_fragment: aws ''' @@ -85,6 +98,12 @@ EXAMPLES = ''' snapshot_tags: frequency: hourly source: /data + +# Remove a snapshot +- local_action: + module: ec2_snapshot + snapshot_id: snap-abcd1234 + state: absent ''' import sys @@ -103,24 +122,28 @@ def main(): volume_id = dict(), description = dict(), instance_id = dict(), + snapshot_id = dict(), device_name = dict(), wait = dict(type='bool', default='true'), wait_timeout = dict(default=0), snapshot_tags = dict(type='dict', default=dict()), + state = dict(choices=['absent','present'], default='present'), ) ) module = AnsibleModule(argument_spec=argument_spec) volume_id = module.params.get('volume_id') + snapshot_id = module.params.get('snapshot_id') description = module.params.get('description') instance_id = module.params.get('instance_id') device_name = module.params.get('device_name') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') snapshot_tags = module.params.get('snapshot_tags') + state = module.params.get('state') - if not volume_id and not instance_id or volume_id and instance_id: - module.fail_json('One and only one of volume_id or instance_id must be specified') + if not volume_id and not instance_id and not snapshot_id or volume_id and instance_id and snapshot_id: + module.fail_json('One and only one of volume_id or instance_id or snapshot_id must be specified') if instance_id and not device_name or device_name and not instance_id: module.fail_json('Instance ID and device name must both be specified') @@ -135,6 +158,20 @@ def main(): except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + if state == 'absent': + if not snapshot_id: + module.fail_json(msg = 'snapshot_id must be set when state is absent') + try: + snapshots = ec2.get_all_snapshots([snapshot_id]) + ec2.delete_snapshot(snapshot_id) + module.exit_json(changed=True) + except boto.exception.BotoServerError, e: + # exception is raised if snapshot does not exist + if e.error_code == 'InvalidSnapshot.NotFound': + module.exit_json(changed=False) + else: + module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + try: snapshot = ec2.create_snapshot(volume_id, description=description) time_waited = 0 From 18e641838ad96b5775c5b695b323f7f6dbafe7ca Mon Sep 17 00:00:00 2001 From: Tomasz Kontusz Date: Tue, 2 Dec 2014 22:33:49 +0100 Subject: [PATCH 133/250] supervisorctl: Don't try to start a starting program Starting a "STARTING" program throws ERROR (already started), so don't do that. --- web_infrastructure/supervisorctl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web_infrastructure/supervisorctl.py b/web_infrastructure/supervisorctl.py index 2d458169e76..f75992b9a6a 100644 --- a/web_infrastructure/supervisorctl.py +++ b/web_infrastructure/supervisorctl.py @@ -210,10 +210,10 @@ def main(): module.fail_json(msg=out, name=name, state=state) if state == 'started': - take_action_on_processes(processes, lambda s: s != 'RUNNING', 'start', 'started') + take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started') if state == 'stopped': - take_action_on_processes(processes, lambda s: s == 'RUNNING', 'stop', 'stopped') + take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped') # import module snippets from ansible.module_utils.basic import * From 799a75580ac19d3920749e0a07ef414d7e0542f9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 2 Dec 2014 14:38:32 -0800 Subject: [PATCH 134/250] Update cron example for setting to run twice a day Fixes #415 --- system/cron.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/cron.py b/system/cron.py index d14f36253c0..c0a39b61c61 100644 --- a/system/cron.py +++ b/system/cron.py @@ -123,8 +123,8 @@ updates: [ 'Mike Grozak', 'Patrick Callahan' ] EXAMPLES = ''' # Ensure a job that runs at 2 and 5 exists. -# Creates an entry like "* 5,2 * * ls -alh > /dev/null" -- cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null" +# Creates an entry like "0 5,2 * * ls -alh > /dev/null" +- cron: name="check dirs" minute="0" hour="5,2" job="ls -alh > /dev/null" # Ensure an old job is no longer present. Removes any job that is prefixed # by "#Ansible: an old job" from the crontab From f69a7553f760e8e9b45b62cb22d21e094f4c2f43 Mon Sep 17 00:00:00 2001 From: Francois Deppierraz Date: Wed, 3 Dec 2014 16:06:50 +0100 Subject: [PATCH 135/250] Fix auto_floating_ip documentation The default value is 'no' instead of the currently documented 'yes'. See cloud/openstack/nova_compute.py line 543: auto_floating_ip = dict(default=False, type='bool'), --- cloud/openstack/nova_compute.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/nova_compute.py b/cloud/openstack/nova_compute.py index 2b21ef86610..b51a1891a7d 100644 --- a/cloud/openstack/nova_compute.py +++ b/cloud/openstack/nova_compute.py @@ -121,7 +121,7 @@ options: description: - Should a floating ip be auto created and assigned required: false - default: 'yes' + default: 'no' version_added: "1.8" floating_ips: description: From dda6d89060f01a19efc46b8e4af53e455ad4731f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 3 Dec 2014 07:06:20 -0800 Subject: [PATCH 136/250] Fix typo so docs will build --- database/postgresql/postgresql_db.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/postgresql/postgresql_db.py b/database/postgresql/postgresql_db.py index 60bbf247bf9..233ae3d1c87 100644 --- a/database/postgresql/postgresql_db.py +++ b/database/postgresql/postgresql_db.py @@ -45,7 +45,7 @@ options: required: false default: localhost login_unix_socket: - description + description: - Path to a Unix domain socket for local connections required: false default: null From 5af446382326aa93f89772316a84105b5110817f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 3 Dec 2014 08:16:59 -0800 Subject: [PATCH 137/250] Gixes to doc formatting --- cloud/amazon/cloudformation.py | 1 + cloud/amazon/ec2.py | 3 ++- database/postgresql/postgresql_privs.py | 2 +- database/postgresql/postgresql_user.py | 2 +- system/service.py | 2 +- 5 files changed, 6 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index 4048a9922bf..aad5795c939 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -20,6 +20,7 @@ module: cloudformation short_description: create a AWS CloudFormation stack description: - Launches an AWS CloudFormation stack and waits for it complete. +version_added: "1.1" options: stack_name: description: diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 87dc80b5e36..fc4ec64b8a4 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -19,7 +19,8 @@ DOCUMENTATION = ''' module: ec2 short_description: create, terminate, start or stop an instance in ec2 description: - - Creates or terminates ec2 instances. + - Creates or terminates ec2 instances. +version_added: "0.9" options: key_name: description: diff --git a/database/postgresql/postgresql_privs.py b/database/postgresql/postgresql_privs.py index e618beefcc9..e78f2b14036 100644 --- a/database/postgresql/postgresql_privs.py +++ b/database/postgresql/postgresql_privs.py @@ -100,7 +100,7 @@ options: required: no default: 5432 unix_socket: - description + description: - Path to a Unix domain socket for local connections. - 'Alias: I(login_unix_socket)' required: false diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index 3581d2ea45c..a61cb544073 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -79,7 +79,7 @@ options: required: false default: localhost login_unix_socket: - description + description: - Path to a Unix domain socket for local connections required: false default: null diff --git a/system/service.py b/system/service.py index 2da5601aee9..b9627ca890f 100644 --- a/system/service.py +++ b/system/service.py @@ -25,7 +25,7 @@ author: Michael DeHaan version_added: "0.1" short_description: Manage services. description: - - Controls services on remote hosts. Supported init systems are: BSD init, + - Controls services on remote hosts. Supported init systems include BSD init, OpenRC, SysV, systemd, upstart. options: name: From 53afd359f62183f3016686cf61715dd8edf9a50a Mon Sep 17 00:00:00 2001 From: Scott Miller Date: Wed, 3 Dec 2014 11:45:42 -0500 Subject: [PATCH 138/250] fix documentation AWS does not recognize the subnet if it is presented in a comma delimited format with spaces. you must remove the space for Amazon to recognize the second subnet. --- cloud/amazon/ec2_elb_lb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 6235770eb38..1ebccf73cdf 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -215,7 +215,7 @@ EXAMPLES = """ name: 'New ELB' security_group_ids: 'sg-123456, sg-67890' region: us-west-2 - subnets: 'subnet-123456, subnet-67890' + subnets: 'subnet-123456,subnet-67890' purge_subnets: yes listeners: - protocol: http From b766390ae2e0fc79a32bb3a55eed959655b76a43 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 3 Dec 2014 14:43:20 -0800 Subject: [PATCH 139/250] Add USAGE as a valid privilege --- database/postgresql/postgresql_privs.py | 2 +- database/postgresql/postgresql_user.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/database/postgresql/postgresql_privs.py b/database/postgresql/postgresql_privs.py index e78f2b14036..9b9d94923bc 100644 --- a/database/postgresql/postgresql_privs.py +++ b/database/postgresql/postgresql_privs.py @@ -238,7 +238,7 @@ except ImportError: VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT', - 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL')) + 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE')) class Error(Exception): pass diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index a61cb544073..020b3740a63 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -162,8 +162,8 @@ else: _flags = ('SUPERUSER', 'CREATEROLE', 'CREATEUSER', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION') VALID_FLAGS = frozenset(itertools.chain(_flags, ('NO%s' % f for f in _flags))) -VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')), - database=frozenset(('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')), +VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL', 'USAGE')), + database=frozenset(('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL', 'USAGE')), ) class InvalidFlagsError(Exception): From 5c38ea8374a455a2104fdb1c58f330eb2a92ab7a Mon Sep 17 00:00:00 2001 From: follower Date: Fri, 5 Dec 2014 01:17:10 +1300 Subject: [PATCH 140/250] Note that `rsync` must be installed on both local & remote machine In particular, if `rsync` is not installed on the remote machine the following error message will be encountered: "rsync error: remote command not found" --- files/synchronize.py | 1 + 1 file changed, 1 insertion(+) diff --git a/files/synchronize.py b/files/synchronize.py index a225a2ce005..7f706384bc5 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -145,6 +145,7 @@ options: required: false version_added: "1.6" notes: + - `rsync` must be installed on both the local and remote machine. - Inspect the verbose output to validate the destination user/host/path are what was expected. - The remote user for the dest path will always be the remote_user, not From e8edee41669e1054db37659b78191fee01186e06 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 4 Dec 2014 13:35:07 -0800 Subject: [PATCH 141/250] Fix typo --- database/mysql/mysql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 1fc57dc9534..eca83dc2f7c 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -490,7 +490,7 @@ def main(): except SQLParseError, e: module.fail_json(msg=str(e)) except InvalidPrivsError, e: - module.mail_json(msg=str(e)) + module.fail_json(msg=str(e)) else: if password is None: module.fail_json(msg="password parameter required when adding a user") From 449ced1efafed0037d0b848864a1a43c7cdfcfe2 Mon Sep 17 00:00:00 2001 From: Dale Bewley Date: Thu, 4 Dec 2014 17:31:35 -0800 Subject: [PATCH 142/250] use state parameter in examples --- packaging/os/redhat_subscription.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/os/redhat_subscription.py b/packaging/os/redhat_subscription.py index df1c043b89c..b5205edc8b5 100644 --- a/packaging/os/redhat_subscription.py +++ b/packaging/os/redhat_subscription.py @@ -63,11 +63,11 @@ options: EXAMPLES = ''' # Register as user (joe_user) with password (somepass) and auto-subscribe to available content. -- redhat_subscription: action=register username=joe_user password=somepass autosubscribe=true +- redhat_subscription: state=present username=joe_user password=somepass autosubscribe=true # Register with activationkey (1-222333444) and consume subscriptions matching # the names (Red hat Enterprise Server) and (Red Hat Virtualization) -- redhat_subscription: action=register +- redhat_subscription: state=present activationkey=1-222333444 pool='^(Red Hat Enterprise Server|Red Hat Virtualization)$' ''' From 1710b45075d258216b32cf62a636e3a0f1ff098b Mon Sep 17 00:00:00 2001 From: Peter Bwire Date: Fri, 5 Dec 2014 18:08:47 +0300 Subject: [PATCH 143/250] Update django_manage to add database option for migrate Allow passing the database option to the django_manage module for migrations. This is usefull in situations where multiple databases are used by a django application. --- web_infrastructure/django_manage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py index 580cc63c2dd..3e34a6388c0 100644 --- a/web_infrastructure/django_manage.py +++ b/web_infrastructure/django_manage.py @@ -170,7 +170,7 @@ def main(): syncdb=('database', ), test=('failfast', 'testrunner', 'liveserver', 'apps', ), validate=(), - migrate=('apps', 'skip', 'merge'), + migrate=('apps', 'skip', 'merge', 'database',), collectstatic=('link', ), ) From df6d58614ef04de7ad0e99173b218bc4d37aaed1 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 5 Dec 2014 12:45:01 -0500 Subject: [PATCH 144/250] now actually adds usable name for systemd when service only has init script --- system/service.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/system/service.py b/system/service.py index b9627ca890f..c2470523b96 100644 --- a/system/service.py +++ b/system/service.py @@ -420,6 +420,7 @@ class LinuxService(Service): if not systemd_enabled: return False + originalname = name # default to .service if the unit type is not specified if name.find('.') > 0: unit_name, unit_type = name.rsplit('.', 1) @@ -446,6 +447,7 @@ class LinuxService(Service): # systemd also handles init scripts (and is enabled at this point) if initscript: + self.__systemd_unit = originalname return True return False From cf24e7d56c6981cf489f931e580f5d5673687401 Mon Sep 17 00:00:00 2001 From: Ben Whaley Date: Fri, 5 Dec 2014 18:04:16 -0800 Subject: [PATCH 145/250] vpc_zone_identifier should be a list as per the docs --- cloud/amazon/ec2_asg.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index da922530770..ff3910ccd1e 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -276,6 +276,8 @@ def create_autoscaling_group(connection, module): ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) + elif vpc_zone_identifier: + vpc_zone_identifier = ','.join(vpc_zone_identifier) asg_tags = [] for tag in set_tags: @@ -555,7 +557,7 @@ def main(): min_size=dict(type='int'), max_size=dict(type='int'), desired_capacity=dict(type='int'), - vpc_zone_identifier=dict(type='str'), + vpc_zone_identifier=dict(type='list'), replace_batch_size=dict(type='int', default=1), replace_all_instances=dict(type='bool', default=False), replace_instances=dict(type='list', default=[]), From 2d5d7ff542591a55eba0270346087567c8bbeb7b Mon Sep 17 00:00:00 2001 From: Jesse Buchanan Date: Sun, 7 Dec 2014 13:35:24 -0500 Subject: [PATCH 146/250] Files module: Allow touch on hardlinks --- files/file.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/files/file.py b/files/file.py index 7aa5e45d7bc..d8c5b2762d8 100644 --- a/files/file.py +++ b/files/file.py @@ -332,13 +332,13 @@ def main(): open(path, 'w').close() except OSError, e: module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e)) - elif prev_state in ['file', 'directory']: + elif prev_state in ['file', 'directory', 'hard']: try: os.utime(path, None) except OSError, e: module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e)) else: - module.fail_json(msg='Cannot touch other than files and directories') + module.fail_json(msg='Cannot touch other than files, directories, and hardlinks (%s is %s)' % (path, prev_state)) try: module.set_fs_attributes_if_different(file_args, True) except SystemExit, e: From aba9e5e4f91c8232e5c92effa44a2436d3c72772 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 5 Dec 2014 12:45:01 -0500 Subject: [PATCH 147/250] vastly simplified systemd detection, made enable/disable errors clearer both of which allows us to remove 'service name detection' as if systemd is init we still use it to handle initscripts. --- system/service.py | 51 +++++++---------------------------------------- 1 file changed, 7 insertions(+), 44 deletions(-) diff --git a/system/service.py b/system/service.py index c2470523b96..275bac900a9 100644 --- a/system/service.py +++ b/system/service.py @@ -399,7 +399,7 @@ class LinuxService(Service): if os.path.isfile(initscript): self.svc_initscript = initscript - def check_systemd(name, initscript): + def check_systemd(): # verify systemd is installed (by finding systemctl) if not location.get('systemctl', False): return False @@ -414,51 +414,18 @@ class LinuxService(Service): for line in f: if 'systemd' in line: - systemd_enabled = True - break - - if not systemd_enabled: - return False - - originalname = name - # default to .service if the unit type is not specified - if name.find('.') > 0: - unit_name, unit_type = name.rsplit('.', 1) - if unit_type not in ("service", "socket", "device", "mount", "automount", - "swap", "target", "path", "timer", "snapshot"): - name = "%s.service" % name - else: - name = "%s.service" % name - - rc, out, err = self.execute_command("%s list-unit-files" % (location['systemctl'])) - - # adjust the service name to account for template service unit files - index = name.find('@') - if index != -1: - template_name = name[:index+1] - else: - template_name = name - - self.__systemd_unit = None - for line in out.splitlines(): - if line.startswith(template_name): - self.__systemd_unit = name return True - # systemd also handles init scripts (and is enabled at this point) - if initscript: - self.__systemd_unit = originalname - return True - return False # Locate a tool to enable/disable a service - if check_systemd(self.name, self.svc_initscript): + if location.get('systemctl',False) and check_systemd(): # service is managed by systemd - self.enable_cmd = location['systemctl'] + self.__systemd_unit = self.name self.svc_cmd = location['systemctl'] + self.enable_cmd = location['systemctl'] - elif location['initctl'] and os.path.exists("/etc/init/%s.conf" % self.name): + elif location.get('initctl', False) and os.path.exists("/etc/init/%s.conf" % self.name): # service is managed by upstart self.enable_cmd = location['initctl'] # set the upstart version based on the output of 'initctl version' @@ -628,10 +595,6 @@ class LinuxService(Service): self.changed = True action = None - # FIXME: we use chkconfig or systemctl - # to decide whether to run the command here but need something - # similar for upstart - # # Upstart's initctl # @@ -831,9 +794,9 @@ class LinuxService(Service): (rc, out, err) = self.execute_command("%s %s %s" % args) if rc != 0: if err: - self.module.fail_json(msg=err) + self.module.fail_json(msg="Error when trying to %s %s: rc=%s %s" % (action, self.name, rc, err)) else: - self.module.fail_json(msg=out) + self.module.fail_json(msg="Failure for %s %s: rc=%s %s" % (action, self.name, rc, out)) return (rc, out, err) From ba5c36ce5a2aed130391120684df195cf1a46039 Mon Sep 17 00:00:00 2001 From: Marcus Ahle Date: Mon, 8 Dec 2014 12:55:27 -0500 Subject: [PATCH 148/250] Preventing Nonetype is not iterable error when no tags are passed in for a newly created ASG --- cloud/amazon/ec2_asg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index da922530770..668b83c3c68 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -248,7 +248,7 @@ def get_properties(autoscaling_group): properties['instance_facts'] = instance_facts properties['load_balancers'] = autoscaling_group.load_balancers - if hasattr(autoscaling_group, "tags"): + if getattr(autoscaling_group, "tags", None): properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags) return properties From 586d012c4774e4019396c584c71d42ef6f8e785a Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 8 Dec 2014 16:48:53 -0800 Subject: [PATCH 149/250] Revert git module doc update because it appears ssh:// works and bare ssh repo does not This reverts commit e715909831e27cbfed2ae86e886bf154cefd7b6c. --- source_control/git.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source_control/git.py b/source_control/git.py index d7e12508803..06768744b13 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -155,7 +155,7 @@ EXAMPLES = ''' version=release-0.22 # Example read-write git checkout from github -- git: repo=git@github.com/mylogin/hello.git dest=/home/mylogin/hello +- git: repo=ssh://git@github.com/mylogin/hello.git dest=/home/mylogin/hello # Example just ensuring the repo checkout exists - git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout update=no From b326201a65b10c2e38a5e46306423b1286619d23 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 8 Dec 2014 22:08:25 -0800 Subject: [PATCH 150/250] Some cleanups to the git module --- source_control/git.py | 41 ++++++++++++++++------------------------- 1 file changed, 16 insertions(+), 25 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index 06768744b13..766ba99bf28 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -272,7 +272,7 @@ def get_submodule_versions(git_path, module, dest, version='HEAD'): return submodules def clone(git_path, module, repo, dest, remote, depth, version, bare, - reference, recursive): + reference): ''' makes a new git repo if it does not already exist ''' dest_dirname = os.path.dirname(dest) try: @@ -284,8 +284,6 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare, cmd.append('--bare') else: cmd.extend([ '--origin', remote ]) - if recursive: - cmd.extend([ '--recursive' ]) if is_remote_branch(git_path, module, dest, repo, version) \ or is_remote_tag(git_path, module, dest, repo, version): cmd.extend([ '--branch', version ]) @@ -298,7 +296,7 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare, if bare: if remote != 'origin': module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest) - + def has_local_mods(module, git_path, dest, bare): if bare: return False @@ -434,7 +432,7 @@ def get_head_branch(git_path, module, dest, remote, bare=False): f.close() return branch -def fetch(git_path, module, repo, dest, version, remote, bare, track_submodules, recursive): +def fetch(git_path, module, repo, dest, version, remote, bare): ''' updates repo from remote sources ''' out_acc = [] err_acc = [] @@ -459,12 +457,6 @@ def fetch(git_path, module, repo, dest, version, remote, bare, track_submodules, out_acc.append(out2) err_acc.append(err2) - if recursive: - (rc, out3, err3) = submodule_update(git_path, module, dest, track_submodules) - if rc != 0: - module.fail_json(msg="Failed to update submodules: %s" % "".join(out3, err3)) - out_acc.append(out3) - err_acc.append(err3) return (rc, ''.join(out_acc), ''.join(err_acc)) @@ -484,6 +476,7 @@ def submodules_fetch(git_path, module, remote, track_submodules, dest): if not os.path.exists(os.path.join(dest, path, '.git')): changed = True break + ### FIXME: Add the submodule hostkeys here as well # Check for updates to existing modules if not changed: @@ -535,7 +528,7 @@ def submodule_update(git_path, module, dest, track_submodules): return (rc, out, err) -def switch_version(git_path, module, dest, remote, version, recursive, track_submodules): +def switch_version(git_path, module, dest, remote, version): cmd = '' if version != 'HEAD': if is_remote_branch(git_path, module, dest, remote, version): @@ -560,10 +553,6 @@ def switch_version(git_path, module, dest, remote, version, recursive, track_sub module.fail_json(msg="Failed to checkout %s" % (version)) else: module.fail_json(msg="Failed to checkout branch %s" % (branch)) - if recursive: - (rc, out2, err2) = submodule_update(git_path, module, dest, track_submodules) - out1 += out2 - err1 += err1 return (rc, out1, err1) # =========================================== @@ -626,7 +615,7 @@ def main(): if module.params['ssh_opts'] is not None: if not "-o StrictHostKeyChecking=no" in module.params['ssh_opts']: add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) - else: + else: add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) recursive = module.params['recursive'] @@ -645,7 +634,7 @@ def main(): remote_head = get_remote_head(git_path, module, dest, version, repo, bare) module.exit_json(changed=True, before=before, after=remote_head) # there's no git config, so clone - clone(git_path, module, repo, dest, remote, depth, version, bare, reference, recursive) + clone(git_path, module, repo, dest, remote, depth, version, bare, reference) repo_updated = True elif not update: # Just return having found a repo already in the dest path @@ -676,16 +665,16 @@ def main(): repo_updated = False else: repo_updated = False - if repo_updated is not False: + if repo_updated is None: if module.check_mode: module.exit_json(changed=True, before=before, after=remote_head) - fetch(git_path, module, repo, dest, version, remote, bare, track_submodules, recursive) + fetch(git_path, module, repo, dest, version, remote, bare) repo_updated = True # switch to version specified regardless of whether # we cloned or pulled if repo_updated and not bare: - switch_version(git_path, module, dest, remote, version, recursive, track_submodules) + switch_version(git_path, module, dest, remote, version) # Deal with submodules submodules_updated = False @@ -706,14 +695,16 @@ def main(): after = get_version(module, git_path, dest) changed = False - if before != after or local_mods: + if before != after or local_mods or submodules_updated: changed = True - elif submodules_updated: - changed =True # cleanup the wrapper script if ssh_wrapper: - os.remove(ssh_wrapper) + try: + os.remove(ssh_wrapper) + except OSError: + # No need to fail if the file already doesn't exist + pass module.exit_json(changed=changed, before=before, after=after) From 050a462b2a835aa87a973ae1e229ba227fc24e54 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 8 Dec 2014 22:08:50 -0800 Subject: [PATCH 151/250] Fix git mod so that we switch to the desired version even if it was previously downloaded --- source_control/git.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index 766ba99bf28..7ac24804e77 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -672,8 +672,8 @@ def main(): repo_updated = True # switch to version specified regardless of whether - # we cloned or pulled - if repo_updated and not bare: + # we got new revisions from the repository + if not bare: switch_version(git_path, module, dest, remote, version) # Deal with submodules From 55a51f4ca9c24d1f45ab5ac1d54e68cbbd7d2ab7 Mon Sep 17 00:00:00 2001 From: Mike Putnam Date: Tue, 9 Dec 2014 10:09:05 -0600 Subject: [PATCH 152/250] Clarify possible variable value in the docs. --- cloud/amazon/cloudformation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index aad5795c939..72b970cd262 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -50,7 +50,7 @@ options: state: description: - If state is "present", stack will be created. If state is "present" and if stack exists and template has changed, it will be updated. - If state is absent, stack will be removed. + If state is "absent", stack will be removed. required: true default: null aliases: [] From 2493dda4e519b3845f5a4926a4368d65829413c2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 9 Dec 2014 08:20:57 -0800 Subject: [PATCH 153/250] Before pulling submodules from repos add ssh hostkeys for those submodules Fixes #9655 --- source_control/git.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index 7ac24804e77..44d4c143450 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -467,16 +467,23 @@ def submodules_fetch(git_path, module, remote, track_submodules, dest): # no submodules return changed - # Check for new submodules gitmodules_file = open(os.path.join(dest, '.gitmodules'), 'r') for line in gitmodules_file: - if line.strip().startswith('path'): + # Check for new submodules + if not changed and line.strip().startswith('path'): path = line.split('=', 1)[1].strip() # Check that dest/path/.git exists if not os.path.exists(os.path.join(dest, path, '.git')): changed = True - break - ### FIXME: Add the submodule hostkeys here as well + + # add the submodule repo's hostkey + if line.strip().startswith('url'): + repo = line.split('=', 1)[1].strip() + if module.params['ssh_opts'] is not None: + if not "-o StrictHostKeyChecking=no" in module.params['ssh_opts']: + add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) + else: + add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) # Check for updates to existing modules if not changed: From 375025d2e3edf2dca764a50c1c213286f38fc9c2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 9 Dec 2014 09:02:05 -0800 Subject: [PATCH 154/250] Change git update param documentation to match reality --- source_control/git.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source_control/git.py b/source_control/git.py index 44d4c143450..6b85783db20 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -103,7 +103,7 @@ options: choices: [ "yes", "no" ] version_added: "1.2" description: - - If C(no), just returns information about the repository without updating. + - If C(no), do not retrieve new revisions from the origin repository executable: required: false default: null From 8396c063a37737f4cca55eab40b8456e18435a45 Mon Sep 17 00:00:00 2001 From: Andrew Shults Date: Tue, 9 Dec 2014 17:14:16 -0500 Subject: [PATCH 155/250] Strip white space to support multiline permissions in YAML --- database/mysql/mysql_user.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index eca83dc2f7c..0ba049fc505 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -277,8 +277,8 @@ def privileges_unpack(priv): not specified in the string, as MySQL will always provide this by default. """ output = {} - for item in priv.split('/'): - pieces = item.split(':') + for item in priv.strip().split('/'): + pieces = item.strip().split(':') if '.' in pieces[0]: pieces[0] = pieces[0].split('.') for idx, piece in enumerate(pieces): From 6ce7e63c5f49f30cb50b65b9f7abd60734696bd2 Mon Sep 17 00:00:00 2001 From: PikachuEXE Date: Wed, 10 Dec 2014 10:23:59 +0800 Subject: [PATCH 156/250] ~ Add missing documentation for option `email` --- cloud/docker/docker.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 5763e346779..a65b65bf902 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -126,6 +126,12 @@ options: required: false default: null aliases: [] + email: + description: + - Set remote API email + required: false + default: null + aliases: [] hostname: description: - Set container hostname From bd7c6dbd3a52404f92d97be7837307c77ef71cc3 Mon Sep 17 00:00:00 2001 From: Petros Moisiadis Date: Wed, 10 Dec 2014 18:53:55 +0200 Subject: [PATCH 157/250] mysql_user: Added missing privileges Added missing privileges 'CREATE TABLESPACE' and 'PROXY' (see: http://dev.mysql.com/doc/refman/5.5/en/privileges-provided.html). --- database/mysql/mysql_user.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 0ba049fc505..e160fcb68f6 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -156,10 +156,10 @@ VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION', 'DELETE', 'INDEX', 'INSERT', 'SELECT', 'UPDATE', 'CREATE TEMPORARY TABLES', 'TRIGGER', 'CREATE VIEW', 'SHOW VIEW', 'ALTER ROUTINE', 'CREATE ROUTINE', - 'EXECUTE', 'FILE', 'CREATE USER', 'PROCESS', - 'RELOAD', 'REPLICATION CLIENT', 'REPLICATION SLAVE', - 'SHOW DATABASES', 'SHUTDOWN', 'SUPER', 'ALL', - 'ALL PRIVILEGES', 'USAGE',)) + 'EXECUTE', 'FILE', 'CREATE TABLESPACE', 'CREATE USER', + 'PROCESS', 'PROXY', 'RELOAD', 'REPLICATION CLIENT', + 'REPLICATION SLAVE', 'SHOW DATABASES', 'SHUTDOWN', + 'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE',)) class InvalidPrivsError(Exception): pass From 467ad65f735ddb33b6302cf0968074c22d153565 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 10 Dec 2014 11:17:16 -0800 Subject: [PATCH 158/250] Fix documentation formatting --- files/synchronize.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/files/synchronize.py b/files/synchronize.py index 7f706384bc5..a2138b3410d 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -145,16 +145,16 @@ options: required: false version_added: "1.6" notes: - - `rsync` must be installed on both the local and remote machine. + - rsync must be installed on both the local and remote machine. - Inspect the verbose output to validate the destination user/host/path are what was expected. - The remote user for the dest path will always be the remote_user, not - the sudo_user. + the sudo_user. - Expect that dest=~/x will be ~/x even if using sudo. - To exclude files and directories from being synchronized, you may add C(.rsync-filter) files to the source directory. - - + + author: Timothy Appnel ''' From ba139b145b8f1665b9dc184fc7c72087a4a12e8e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 11 Dec 2014 09:05:27 -0500 Subject: [PATCH 159/250] fixed hostname for gentoo --- system/hostname.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/hostname.py b/system/hostname.py index 95b28dbf69d..48311f07a96 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -313,7 +313,7 @@ class OpenRCStrategy(GenericStrategy): def set_permanent_hostname(self, name): try: f = open(self.HOSTNAME_FILE, 'r') - lines = (x.strip() for x in f) + lines = [x.strip() for x in f] for i, line in enumerate(lines): if line.startswith('hostname='): From 5bc8d5b96eaa9fa30021b6edc323b9bd644c69d2 Mon Sep 17 00:00:00 2001 From: Harald Skoglund Date: Thu, 11 Dec 2014 16:41:25 +0100 Subject: [PATCH 160/250] add support for stack policies in cloudformation --- cloud/amazon/cloudformation.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index 72b970cd262..631189bc43c 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -60,6 +60,13 @@ options: required: true default: null aliases: [] + stack_policy: + description: + - the path of the cloudformation stack policy + required: false + default: null + aliases: [] + version_added: "x.x" tags: description: - Dictionary of tags to associate with stack and it's resources during stack creation. Cannot be updated later. @@ -196,6 +203,7 @@ def main(): template_parameters=dict(required=False, type='dict', default={}), state=dict(default='present', choices=['present', 'absent']), template=dict(default=None, required=True), + stack_policy=dict(default=None, required=False), disable_rollback=dict(default=False, type='bool'), tags=dict(default=None) ) @@ -208,6 +216,10 @@ def main(): state = module.params['state'] stack_name = module.params['stack_name'] template_body = open(module.params['template'], 'r').read() + if module.params['stack_policy'] is not None: + stack_policy_body = open(module.params['stack_policy'], 'r').read() + else: + stack_policy_body = None disable_rollback = module.params['disable_rollback'] template_parameters = module.params['template_parameters'] tags = module.params['tags'] @@ -244,6 +256,7 @@ def main(): try: cfn.create_stack(stack_name, parameters=template_parameters_tup, template_body=template_body, + stack_policy_body=stack_policy_body, disable_rollback=disable_rollback, capabilities=['CAPABILITY_IAM'], **kwargs) @@ -264,6 +277,7 @@ def main(): try: cfn.update_stack(stack_name, parameters=template_parameters_tup, template_body=template_body, + stack_policy_body=stack_policy_body, disable_rollback=disable_rollback, capabilities=['CAPABILITY_IAM']) operation = 'UPDATE' From b354423bae06d277e89d2d4ccde1dd2bd610dac6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Harri=20H=C3=A4m=C3=A4l=C3=A4inen?= Date: Thu, 11 Dec 2014 18:59:42 +0200 Subject: [PATCH 161/250] Fixed typo in comment --- cloud/openstack/glance_image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/glance_image.py b/cloud/openstack/glance_image.py index 3bbc6f0ebca..6425fa2ca5d 100644 --- a/cloud/openstack/glance_image.py +++ b/cloud/openstack/glance_image.py @@ -254,7 +254,7 @@ def main(): else: _glance_delete_image(module, module.params, client) -# this is magic, see lib/ansible/module.params['common.py +# this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * main() From 9eb2bff946c8e1147a70b7a1ff51f7dd11ee42ce Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Wed, 10 Dec 2014 14:37:38 -0600 Subject: [PATCH 162/250] Make a few enhancements to the rax_scaling_group module * Cast loadbalancer id and port to integers * Do not attempt to base64 encode NoneType values --- cloud/rackspace/rax_scaling_group.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/cloud/rackspace/rax_scaling_group.py b/cloud/rackspace/rax_scaling_group.py index dcd8dbbfeaa..64783397016 100644 --- a/cloud/rackspace/rax_scaling_group.py +++ b/cloud/rackspace/rax_scaling_group.py @@ -209,8 +209,16 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, lbs = [] if loadbalancers: for lb in loadbalancers: - lb_id = lb.get('id') - port = lb.get('port') + try: + lb_id = int(lb.get('id')) + except (ValueError, TypeError): + module.fail_json(msg='Load balancer ID is not an integer: ' + '%s' % lb.get('id')) + try: + port = int(lb.get('port')) + except (ValueError, TypeError): + module.fail_json(msg='Load balancer port is not an ' + 'integer: %s' % lb.get('port')) if not lb_id or not port: continue lbs.append((lb_id, port)) @@ -294,7 +302,8 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, if config_drive != lc.get('config_drive'): lc_args['config_drive'] = config_drive - if base64.b64encode(user_data) != lc.get('user_data'): + if (user_data and + base64.b64encode(user_data) != lc.get('user_data')): lc_args['user_data'] = user_data if lc_args: From 2e60425b714803fae5e900c335a00b3f45421ff5 Mon Sep 17 00:00:00 2001 From: Jon Hawkesworth Date: Mon, 24 Nov 2014 17:52:05 +0000 Subject: [PATCH 163/250] Adding first pass at win_copy, win_file and win_template modules. --- windows/win_stat.ps1 | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/windows/win_stat.ps1 b/windows/win_stat.ps1 index 4e4c55b2aa3..928b779b488 100644 --- a/windows/win_stat.ps1 +++ b/windows/win_stat.ps1 @@ -53,10 +53,7 @@ Else If ($get_md5 -and $result.stat.exists -and -not $result.stat.isdir) { - $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider; - $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); - $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); - $fp.Dispose(); + $hash = Get-FileMd5($path); Set-Attr $result.stat "md5" $hash; } From 732491eb93631d777aaece959123d33711fcf85a Mon Sep 17 00:00:00 2001 From: Jon Hawkesworth Date: Mon, 24 Nov 2014 17:54:09 +0000 Subject: [PATCH 164/250] Adding first pass at win_copy, win_file and win_template modules (include new files). --- windows/win_copy.ps1 | 84 ++++++++++++++++++++++++++++++++ windows/win_copy.py | 60 +++++++++++++++++++++++ windows/win_file.ps1 | 105 ++++++++++++++++++++++++++++++++++++++++ windows/win_file.py | 73 ++++++++++++++++++++++++++++ windows/win_template.py | 52 ++++++++++++++++++++ 5 files changed, 374 insertions(+) create mode 100644 windows/win_copy.ps1 create mode 100644 windows/win_copy.py create mode 100644 windows/win_file.ps1 create mode 100644 windows/win_file.py create mode 100644 windows/win_template.py diff --git a/windows/win_copy.ps1 b/windows/win_copy.ps1 new file mode 100644 index 00000000000..544187a8b5a --- /dev/null +++ b/windows/win_copy.ps1 @@ -0,0 +1,84 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +$src= Get-Attr $params "src" $FALSE; +If ($src -eq $FALSE) +{ + Fail-Json (New-Object psobject) "missing required argument: src"; +} + +$dest= Get-Attr $params "dest" $FALSE; +If ($dest -eq $FALSE) +{ + Fail-Json (New-Object psobject) "missing required argument: dest"; +} + +# seems to be supplied by the calling environment, but +# probably shouldn't be a test for it existing in the params. +# TODO investigate. +$original_basename = Get-Attr $params "original_basename" $FALSE; +If ($original_basename -eq $FALSE) +{ + Fail-Json (New-Object psobject) "missing required argument: original_basename "; +} + +$result = New-Object psobject @{ + changed = $FALSE +}; + +# if $dest is a dir, append $original_basename so the file gets copied with its intended name. +if (Test-Path $dest -PathType Container) +{ + $dest = Join-Path $dest $original_basename; +} + +If (Test-Path $dest) +{ + $dest_md5 = Get-FileMd5 ($dest); + $src_md5 = Get-FileMd5 ($src); + + If (! $src_md5.CompareTo($dest_md5)) + { + # New-Item -Force creates subdirs for recursive copies + New-Item -Force $dest -Type file; + Copy-Item -Path $src -Destination $dest -Force; + } + $dest_md5 = Get-FileMd5 ($dest); + If ( $src_md5.CompareTo($dest_md5)) + { + $result.changed = $TRUE; + } + Else + { + Fail-Json (New-Object psobject) "Failed to place file"; + } +} +Else +{ + New-Item -Force $dest -Type file; + Copy-Item -Path $src -Destination $dest; + $result.changed = $TRUE; +} + +$dest_checksum = Get-FileMd5($dest); +$result.checksum = $dest_checksum; + +Exit-Json $result; diff --git a/windows/win_copy.py b/windows/win_copy.py new file mode 100644 index 00000000000..7d0b49e5985 --- /dev/null +++ b/windows/win_copy.py @@ -0,0 +1,60 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import time + +DOCUMENTATION = ''' +--- +module: win_copy +version_added: "1.8" +short_description: Copies files to remote locations on windows hosts. +description: + - The M(win_copy) module copies a file on the local box to remote windows locations. +options: + src: + description: + - Local path to a file to copy to the remote server; can be absolute or relative. + If path is a directory, it is copied recursively. In this case, if path ends + with "/", only inside contents of that directory are copied to destination. + Otherwise, if it does not end with "/", the directory itself with all contents + is copied. This behavior is similar to Rsync. + required: false + default: null + aliases: [] + dest: + description: + - Remote absolute path where the file should be copied to. If src is a directory, + this must be a directory too. Use \\ for path separators. + required: true + default: null +author: Michael DeHaan +notes: + - The "win_copy" module recursively copy facility does not scale to lots (>hundreds) of files. + Instead, you may find it better to create files locally, perhaps using win_template, and + then use win_get_url to put them in the correct location. +''' + +EXAMPLES = ''' +# Example from Ansible Playbooks +- win_copy: src=/srv/myfiles/foo.conf dest=c:\\TEMP\\foo.conf + +''' + diff --git a/windows/win_file.ps1 b/windows/win_file.ps1 new file mode 100644 index 00000000000..62ac81fc1ee --- /dev/null +++ b/windows/win_file.ps1 @@ -0,0 +1,105 @@ +#!powershell +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +# WANT_JSON +# POWERSHELL_COMMON + +$params = Parse-Args $args; + +# path +$path = Get-Attr $params "path" $FALSE; +If ($path -eq $FALSE) +{ + $path = Get-Attr $params "dest" $FALSE; + If ($path -eq $FALSE) + { + $path = Get-Attr $params "name" $FALSE; + If ($path -eq $FALSE) + { + Fail-Json (New-Object psobject) "missing required argument: path"; + } + } +} + +# JH Following advice from Chris Church, only allow the following states +# in the windows version for now: +# state - file, directory, touch, absent +# (originally was: state - file, link, directory, hard, touch, absent) + +$state = Get-Attr $params "state" "file"; + +#$recurse = Get-Attr $params "recurse" "no"; + +# force - yes, no +# $force = Get-Attr $params "force" "no"; + +# result +$result = New-Object psobject @{ + changed = $FALSE +}; + +If ( $state -eq "touch" ) +{ + If(Test-Path $path) + { + (Get-ChildItem $path).LastWriteTime = Get-Date + } + Else + { + echo $null > $file + } + $result.changed = $TRUE; +} + +If (Test-Path $path) +{ + $fileinfo = Get-Item $path; + If ( $state -eq "absent" ) + { + Remove-Item -Recurse -Force $fileinfo; + $result.changed = $TRUE; + } + Else + { + # Only files have the .Directory attribute. + If ( $state -eq "directory" -and $fileinfo.Directory ) + { + Fail-Json (New-Object psobject) "path is not a directory"; + } + + # Only files have the .Directory attribute. + If ( $state -eq "file" -and -not $fileinfo.Directory ) + { + Fail-Json (New-Object psobject) "path is not a file"; + } + + } +} +Else +{ + If ( $state -eq "directory" ) + { + New-Item -ItemType directory -Path $path + $result.changed = $TRUE; + } + + If ( $state -eq "file" ) + { + Fail-Json (New-Object psobject) "path will not be created"; + } +} + +Exit-Json $result; diff --git a/windows/win_file.py b/windows/win_file.py new file mode 100644 index 00000000000..6a218216617 --- /dev/null +++ b/windows/win_file.py @@ -0,0 +1,73 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION = ''' +--- +module: win_file +version_added: "1.8" +short_description: Creates, touches or removes files or directories. +extends_documentation_fragment: files +description: + - Creates (empty) files, updates file modification stamps of existing files, + and can create or remove directories. + Unlike M(file), does not modify ownership, permissions or manipulate links. +notes: + - See also M(win_copy), M(win_template), M(copy), M(template), M(assemble) +requirements: [ ] +author: Michael DeHaan +options: + path: + description: + - 'path to the file being managed. Aliases: I(dest), I(name)' + required: true + default: [] + aliases: ['dest', 'name'] + state: + description: + - If C(directory), all immediate subdirectories will be created if they + do not exist. + If C(file), the file will NOT be created if it does not exist, see the M(copy) + or M(template) module if you want that behavior. If C(absent), + directories will be recursively deleted, and files will be removed. + If C(touch), an empty file will be created if the c(path) does not + exist, while an existing file or directory will receive updated file access and + modification times (similar to the way `touch` works from the command line). + required: false + default: file + choices: [ file, directory, touch, absent ] +''' + +EXAMPLES = ''' +# create a file +- win_file: path=C:\\temp\\foo.conf + +# touch a file (creates if not present, updates modification time if present) +- win_file: path=C:\\temp\\foo.conf state=touch + +# remove a file, if present +- win_file: path=C:\\temp\\foo.conf state=absent + +# create directory structure +- win_file: path=C:\\temp\\folder\\subfolder state=directory + +# remove directory structure +- win_file: path=C:\\temp state=absent +''' diff --git a/windows/win_template.py b/windows/win_template.py new file mode 100644 index 00000000000..402702f93b2 --- /dev/null +++ b/windows/win_template.py @@ -0,0 +1,52 @@ +# this is a virtual module that is entirely implemented server side + +DOCUMENTATION = ''' +--- +module: win_template +version_added: 1.8 +short_description: Templates a file out to a remote server. +description: + - Templates are processed by the Jinja2 templating language + (U(http://jinja.pocoo.org/docs/)) - documentation on the template + formatting can be found in the Template Designer Documentation + (U(http://jinja.pocoo.org/docs/templates/)). + - "Six additional variables can be used in templates: C(ansible_managed) + (configurable via the C(defaults) section of C(ansible.cfg)) contains a string + which can be used to describe the template name, host, modification time of the + template file and the owner uid, C(template_host) contains the node name of + the template's machine, C(template_uid) the owner, C(template_path) the + absolute path of the template, C(template_fullpath) is the absolute path of the + template, and C(template_run_date) is the date that the template was rendered. Note that including + a string that uses a date in the template will result in the template being marked 'changed' + each time." +options: + src: + description: + - Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path. + required: true + default: null + aliases: [] + dest: + description: + - Location to render the template to on the remote machine. + required: true + default: null + backup: + description: + - Create a backup file including the timestamp information so you can get + the original file back if you somehow clobbered it incorrectly. + required: false + choices: [ "yes", "no" ] + default: "no" +notes: + - "templates are loaded with C(trim_blocks=True)." +requirements: [] +author: Michael DeHaan +''' + +EXAMPLES = ''' +# Example +- win_template: src=/mytemplates/foo.j2 dest=C:\\temp\\file.conf + + +''' From 08c5cc06c6ad9a1e0016ad89eb0f7ca009cc8108 Mon Sep 17 00:00:00 2001 From: Jon Hawkesworth Date: Thu, 11 Dec 2014 21:47:52 +0000 Subject: [PATCH 165/250] Switched to SHA1 checksum instead of MD5 --- windows/win_copy.ps1 | 12 ++++++------ windows/win_stat.ps1 | 3 ++- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/windows/win_copy.ps1 b/windows/win_copy.ps1 index 544187a8b5a..9ffdab85f03 100644 --- a/windows/win_copy.ps1 +++ b/windows/win_copy.ps1 @@ -52,17 +52,17 @@ if (Test-Path $dest -PathType Container) If (Test-Path $dest) { - $dest_md5 = Get-FileMd5 ($dest); - $src_md5 = Get-FileMd5 ($src); + $dest_checksum = Get-FileChecksum ($dest); + $src_checksum = Get-FileChecksum ($src); - If (! $src_md5.CompareTo($dest_md5)) + If (! $src_checksum.CompareTo($dest_checksum)) { # New-Item -Force creates subdirs for recursive copies New-Item -Force $dest -Type file; Copy-Item -Path $src -Destination $dest -Force; } - $dest_md5 = Get-FileMd5 ($dest); - If ( $src_md5.CompareTo($dest_md5)) + $dest_checksum = Get-FileChecksum ($dest); + If ( $src_checksum.CompareTo($dest_checksum)) { $result.changed = $TRUE; } @@ -78,7 +78,7 @@ Else $result.changed = $TRUE; } -$dest_checksum = Get-FileMd5($dest); +$dest_checksum = Get-FileChecksum($dest); $result.checksum = $dest_checksum; Exit-Json $result; diff --git a/windows/win_stat.ps1 b/windows/win_stat.ps1 index 928b779b488..10101a62b30 100644 --- a/windows/win_stat.ps1 +++ b/windows/win_stat.ps1 @@ -53,8 +53,9 @@ Else If ($get_md5 -and $result.stat.exists -and -not $result.stat.isdir) { - $hash = Get-FileMd5($path); + $hash = Get-FileChecksum($path); Set-Attr $result.stat "md5" $hash; + Set-Attr $result.stat "checksum" $hash; } Exit-Json $result; From f4625a3dd104e245a80ff547deb75f0de880d24f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 12 Dec 2014 11:22:20 -0800 Subject: [PATCH 166/250] Fixup the directory name at a higher level so it can be used by both conditional branches Fixes #500 --- cloud/google/gc_storage.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cloud/google/gc_storage.py b/cloud/google/gc_storage.py index 1963a148da2..28beea05783 100644 --- a/cloud/google/gc_storage.py +++ b/cloud/google/gc_storage.py @@ -319,11 +319,12 @@ def handle_create(module, gs, bucket, obj): else: module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket)) if bucket and obj: + if obj.endswith('/'): + dirobj = obj + else: + dirobj = obj + "/" + if bucket_check(module, gs, bucket): - if obj.endswith('/'): - dirobj = obj - else: - dirobj = obj + "/" if key_check(module, gs, bucket, dirobj): module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False) else: From 5c6f375c1714b48de153c372cc379d5e6d93b01c Mon Sep 17 00:00:00 2001 From: Mike Putnam Date: Fri, 12 Dec 2014 13:29:51 -0600 Subject: [PATCH 167/250] Type fix on delete_snapshot docs. --- cloud/amazon/ec2_ami.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_ami.py b/cloud/amazon/ec2_ami.py index c50e5a88048..ab1f986356b 100644 --- a/cloud/amazon/ec2_ami.py +++ b/cloud/amazon/ec2_ami.py @@ -79,7 +79,7 @@ options: aliases: [] delete_snapshot: description: - - Whether or not to deleted an AMI while deregistering it. + - Whether or not to delete an AMI while deregistering it. required: false default: null aliases: [] From e1f90635af0e9ca09449fe47f94471bf9e4ffa5d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 12 Dec 2014 12:08:03 -0800 Subject: [PATCH 168/250] Don't traceback if a gid is specified instead of a group name Fixes https://github.com/ansible/ansible/issues/9796 --- system/user.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/system/user.py b/system/user.py index 6fe20122e91..44dffba3b8d 100644 --- a/system/user.py +++ b/system/user.py @@ -447,21 +447,23 @@ class User(object): def group_exists(self,group): try: - if group.isdigit(): - if grp.getgrgid(int(group)): - return True - else: - if grp.getgrnam(group): - return True - except KeyError: - return False + # Try group as a gid first + grp.getgrgid(int(group)) + return True + except (ValueError, KeyError): + try: + grp.getgrnam(group) + return True + except KeyError: + return False - def group_info(self,group): + def group_info(self, group): if not self.group_exists(group): return False - if group.isdigit(): - return list(grp.getgrgid(group)) - else: + try: + # Try group as a gid first + return list(grp.getgrgid(int(group))) + except (ValueError, KeyError): return list(grp.getgrnam(group)) def get_groups_set(self, remove_existing=True): From 07b98c45df5dafcbe08eda24e790480d657c1719 Mon Sep 17 00:00:00 2001 From: kustodian Date: Sat, 13 Dec 2014 17:24:10 +0100 Subject: [PATCH 169/250] Fixed postgresql_db failing on Python 2.4 with --check This reverts commit 81cbdb6c8cf54c41ba2ee3330c968e2feea05a5c and adds ignoring of the SystemExit exception because of Python 2.4. --- database/postgresql/postgresql_db.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/database/postgresql/postgresql_db.py b/database/postgresql/postgresql_db.py index 32cc930cd98..695a550c402 100644 --- a/database/postgresql/postgresql_db.py +++ b/database/postgresql/postgresql_db.py @@ -281,15 +281,19 @@ def main(): elif state == "present": changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype) - else: - if state == "absent": - changed = db_delete(cursor, db) + module.exit_json(changed=changed,db=db) - elif state == "present": - changed = db_create(cursor, db, owner, template, encoding, - lc_collate, lc_ctype) + if state == "absent": + changed = db_delete(cursor, db) + + elif state == "present": + changed = db_create(cursor, db, owner, template, encoding, + lc_collate, lc_ctype) except NotSupportedError, e: module.fail_json(msg=str(e)) + except SystemExit: + # Avoid catching this on Python 2.4 + raise except Exception, e: module.fail_json(msg="Database query failed: %s" % e) From 0fee1132ad408e34317ee60a6b5fd7f011d0aaee Mon Sep 17 00:00:00 2001 From: sysadmin75 Date: Sat, 13 Dec 2014 15:14:32 -0500 Subject: [PATCH 170/250] Fixes #9518 - "file state=directory" silently skips if it's currently a file --- files/file.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/files/file.py b/files/file.py index 7aa5e45d7bc..e154d6ad07f 100644 --- a/files/file.py +++ b/files/file.py @@ -240,6 +240,10 @@ def main(): tmp_file_args['path']=curpath changed = module.set_fs_attributes_if_different(tmp_file_args, changed) + # We already know prev_state is not 'absent', therefore it exists in some form. + elif prev_state != 'directory': + module.fail_json(path=path, msg='%s already exists as a %s' % (path, prev_state)) + changed = module.set_fs_attributes_if_different(file_args, changed) if recurse: From 39cd64ec17d4dd964383a659695481602b956b53 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Sun, 14 Dec 2014 21:54:30 +0100 Subject: [PATCH 171/250] Revert commit cbc417c, as the code is broken, see #438 LooseVersion expect a string, so filtering to return a int is incorrect. Thanks to Jeremy Brown for the analysis. --- system/hostname.py | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/system/hostname.py b/system/hostname.py index 48311f07a96..03577cfa8c2 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -45,18 +45,6 @@ from distutils.version import LooseVersion from ansible.module_utils.basic import * -# wrap get_distribution_version in case it returns a string -def _get_distribution_version(): - distribution_version = get_distribution_version() - - if type(distribution_version) is str: - distribution_version = 0 - elif type(distribution_version) is None: - distribution_version = 0 - - return distribution_version - - class UnimplementedStrategy(object): def __init__(self, module): self.module = module @@ -353,7 +341,7 @@ class RedHat5Hostname(Hostname): class RedHatServerHostname(Hostname): platform = 'Linux' distribution = 'Red hat enterprise linux server' - distribution_version = _get_distribution_version() + distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): strategy_class = FedoraStrategy else: @@ -362,7 +350,7 @@ class RedHatServerHostname(Hostname): class RedHatWorkstationHostname(Hostname): platform = 'Linux' distribution = 'Red hat enterprise linux workstation' - distribution_version = _get_distribution_version() + distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): strategy_class = FedoraStrategy else: @@ -371,7 +359,7 @@ class RedHatWorkstationHostname(Hostname): class CentOSHostname(Hostname): platform = 'Linux' distribution = 'Centos' - distribution_version = _get_distribution_version() + distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): strategy_class = FedoraStrategy else: @@ -380,7 +368,7 @@ class CentOSHostname(Hostname): class CentOSLinuxHostname(Hostname): platform = 'Linux' distribution = 'Centos linux' - distribution_version = _get_distribution_version() + distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): strategy_class = FedoraStrategy else: From 28375aae7fcb4b89477e3eae6b1908a3fc169e72 Mon Sep 17 00:00:00 2001 From: Jeff Bradberry Date: Sun, 14 Dec 2014 20:48:36 -0500 Subject: [PATCH 172/250] Fix breakage in lineinfile check mode when target file does not exist. Similarly to https://github.com/ansible/ansible/issues/6182, checking of the file attributes should be avoided in check mode when the file didn't originally exist. Also, avoid creating parent directories in check mode. Fixes https://github.com/ansible/ansible/issues/9546 --- files/lineinfile.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/files/lineinfile.py b/files/lineinfile.py index ef73bde7b7f..b9fc628e10c 100644 --- a/files/lineinfile.py +++ b/files/lineinfile.py @@ -192,7 +192,7 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create, if not create: module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) destpath = os.path.dirname(dest) - if not os.path.exists(destpath): + if not os.path.exists(destpath) and not module.check_mode: os.makedirs(destpath) lines = [] else: @@ -282,6 +282,9 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create, backupdest = module.backup_local(dest) write_changes(module, lines, dest) + if module.check_mode and not os.path.exists(dest): + module.exit_json(changed=changed, msg=msg, backup=backupdest) + msg, changed = check_file_attrs(module, changed, msg) module.exit_json(changed=changed, msg=msg, backup=backupdest) From 369eeb2666dacf4101db121c7ffb69950a7d3269 Mon Sep 17 00:00:00 2001 From: Jeff Gonzalez Date: Mon, 15 Dec 2014 10:18:31 -0600 Subject: [PATCH 173/250] Fixed formatting in documentation The example was using mixed shorthand and long form yaml (region: "us-east-1 disable_rollback=true" I modified the entire example to be long form. --- cloud/amazon/cloudformation.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index 72b970cd262..ff4a66373c0 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -100,7 +100,8 @@ tasks: cloudformation: stack_name: "ansible-cloudformation" state: "present" - region: "us-east-1 disable_rollback=true" + region: "us-east-1" + disable_rollback: true template: "files/cloudformation-example.json" template_parameters: KeyName: "jmartin" From 7350d2cc7f58337bad5abcc1ffd4ed78335d75f3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 15 Dec 2014 13:34:03 -0800 Subject: [PATCH 174/250] Add error message from git to message returned from ansible Fixes #81 --- source_control/git.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index 6b85783db20..9e333586890 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -443,19 +443,21 @@ def fetch(git_path, module, repo, dest, version, remote, bare): (rc, out1, err1) = module.run_command([git_path, 'fetch', remote, '+refs/heads/*:refs/heads/*'], cwd=dest) else: (rc, out1, err1) = module.run_command("%s fetch %s" % (git_path, remote), cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to download remote objects and refs") out_acc.append(out1) err_acc.append(err1) + if rc != 0: + module.fail_json(msg="Failed to download remote objects and refs: %s %s" % + (''.join(out_acc), ''.join(err_acc))) if bare: (rc, out2, err2) = module.run_command([git_path, 'fetch', remote, '+refs/tags/*:refs/tags/*'], cwd=dest) else: (rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote), cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to download remote objects and refs") out_acc.append(out2) err_acc.append(err2) + if rc != 0: + module.fail_json(msg="Failed to download remote objects and refs: %s %s" % + (''.join(out_acc), ''.join(err_acc))) return (rc, ''.join(out_acc), ''.join(err_acc)) From 2e8858216559ae351155dc87280337d1f05cbc99 Mon Sep 17 00:00:00 2001 From: zitterbacke Date: Tue, 16 Dec 2014 15:10:00 +0100 Subject: [PATCH 175/250] re-enable AIX password setting the AIX class uses a unsafe shell for setting the user password (containing a pipe in the command). This patch adopts to the new behavior of module_utils/basic.py (since somewhere around 1.7). besides it changes the qoutes for the echo command from double to single, because password-hashes contain $-signs and one would not have this variables expanded. --- system/user.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/system/user.py b/system/user.py index 44dffba3b8d..30ae29d30ae 100644 --- a/system/user.py +++ b/system/user.py @@ -262,12 +262,12 @@ class User(object): # select whether we dump additional debug info through syslog self.syslogging = False - def execute_command(self, cmd): + def execute_command(self, cmd, use_unsafe_shell=False): if self.syslogging: syslog.openlog('ansible-%s' % os.path.basename(__file__)) syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd)) - return self.module.run_command(cmd) + return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell) def remove_user_userdel(self): cmd = [self.module.get_bin_path('userdel', True)] @@ -1367,11 +1367,11 @@ class AIX(User): # set password with chpasswd if self.password is not None: cmd = [] - cmd.append('echo "'+self.name+':'+self.password+'" |') + cmd.append('echo \''+self.name+':'+self.password+'\' |') cmd.append(self.module.get_bin_path('chpasswd', True)) cmd.append('-e') cmd.append('-c') - self.execute_command(' '.join(cmd)) + self.execute_command(' '.join(cmd), use_unsafe_shell=True) return (rc, out, err) @@ -1443,11 +1443,11 @@ class AIX(User): # set password with chpasswd if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd = [] - cmd.append('echo "'+self.name+':'+self.password+'" |') + cmd.append('echo \''+self.name+':'+self.password+'\' |') cmd.append(self.module.get_bin_path('chpasswd', True)) cmd.append('-e') cmd.append('-c') - (rc2, out2, err2) = self.execute_command(' '.join(cmd)) + (rc2, out2, err2) = self.execute_command(' '.join(cmd), use_unsafe_shell=True) else: (rc2, out2, err2) = (None, '', '') From 541070be4c3b9eb7b31608a7c2f2676bfe9bc0e7 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Tue, 16 Dec 2014 09:49:13 -0500 Subject: [PATCH 176/250] Fixed hostname for rhel5 python 2.4.3 --- system/hostname.py | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/system/hostname.py b/system/hostname.py index 48311f07a96..aa562bad399 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -298,13 +298,14 @@ class OpenRCStrategy(GenericStrategy): def get_permanent_hostname(self): try: - f = open(self.HOSTNAME_FILE, 'r') - for line in f: - line = line.strip() - if line.startswith('hostname='): - return line[10:].strip('"') - except Exception, err: - self.module.fail_json(msg="failed to read hostname: %s" % str(err)) + try: + f = open(self.HOSTNAME_FILE, 'r') + for line in f: + line = line.strip() + if line.startswith('hostname='): + return line[10:].strip('"') + except Exception, err: + self.module.fail_json(msg="failed to read hostname: %s" % str(err)) finally: f.close() @@ -312,19 +313,20 @@ class OpenRCStrategy(GenericStrategy): def set_permanent_hostname(self, name): try: - f = open(self.HOSTNAME_FILE, 'r') - lines = [x.strip() for x in f] + try: + f = open(self.HOSTNAME_FILE, 'r') + lines = [x.strip() for x in f] - for i, line in enumerate(lines): - if line.startswith('hostname='): - lines[i] = 'hostname="%s"' % name - break - f.close() + for i, line in enumerate(lines): + if line.startswith('hostname='): + lines[i] = 'hostname="%s"' % name + break + f.close() - f = open(self.HOSTNAME_FILE, 'w') - f.write('\n'.join(lines) + '\n') - except Exception, err: - self.module.fail_json(msg="failed to update hostname: %s" % str(err)) + f = open(self.HOSTNAME_FILE, 'w') + f.write('\n'.join(lines) + '\n') + except Exception, err: + self.module.fail_json(msg="failed to update hostname: %s" % str(err)) finally: f.close() From 51ed13b8874a90ad3191301647901234eb10f02b Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 16 Dec 2014 11:50:41 -0800 Subject: [PATCH 177/250] Fix documentation build --- cloud/vmware/vsphere_guest.py | 12 ++++++------ windows/win_user.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 378e3ecb9eb..86cc9f00fa7 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -65,18 +65,18 @@ options: default: null state: description: - - Indicate desired state of the vm. + - Indicate desired state of the vm. default: present choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured'] from_template: description: - - Specifies if the VM should be deployed from a template (cannot be ran with state) - default: no - choices: ['yes', 'no'] + - Specifies if the VM should be deployed from a template (cannot be ran with state) + default: no + choices: ['yes', 'no'] template_src: description: - - Name of the source template to deploy from - default: None + - Name of the source template to deploy from + default: None vm_disk: description: - A key, value list of disks and their sizes and which datastore to keep it in. diff --git a/windows/win_user.py b/windows/win_user.py index 6d3620fabbd..cd981916f1a 100644 --- a/windows/win_user.py +++ b/windows/win_user.py @@ -53,7 +53,7 @@ options: update_password: description: - C(always) will update passwords if they differ. C(on_create) will - only set the password for newly created users. + only set the password for newly created users. required: false choices: [ 'always', 'on_create' ] default: always From 9c6826e9286f2e683c583ff11ccd562bfb5eed8c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 16 Dec 2014 13:54:23 -0800 Subject: [PATCH 178/250] Add text/json as a mimetype to try deserializing Fixes #503 --- network/basics/uri.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index 95bf5c705fe..aac724a8f13 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -426,7 +426,8 @@ def main(): uresp[ukey] = value if 'content_type' in uresp: - if uresp['content_type'].startswith('application/json'): + if uresp['content_type'].startswith('application/json') or \ + uresp['content_type'].startswith('text/json'): try: js = json.loads(content) uresp['json'] = js From e174c9b4741a14dc5ab3cbe0506cef4010396155 Mon Sep 17 00:00:00 2001 From: Kale Franz Date: Wed, 17 Dec 2014 03:06:05 -0800 Subject: [PATCH 179/250] fixes #529 ec2_group module bug --- cloud/amazon/ec2_group.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/ec2_group.py b/cloud/amazon/ec2_group.py index 7d081a29620..edecc72ce34 100644 --- a/cloud/amazon/ec2_group.py +++ b/cloud/amazon/ec2_group.py @@ -114,11 +114,21 @@ except ImportError: sys.exit(1) +def make_rule_key(prefix, rule, group_id, cidr_ip): + """Creates a unique key for an individual group rule""" + if isinstance(rule, dict): + proto, from_port, to_port = (rule.get(x, None) for x in ('proto', 'from_port', 'to_port')) + else: # isinstance boto.ec2.securitygroup.IPPermissions + proto, from_port, to_port = (getattr(rule, x, None) for x in ('ip_protocol', 'from_port', 'to_port')) + + key = "%s-%s-%s-%s-%s-%s" % (prefix, proto, from_port, to_port, group_id, cidr_ip) + return key.lower().replace('-none', '-None') + + def addRulesToLookup(rules, prefix, dict): for rule in rules: for grant in rule.grants: - dict["%s-%s-%s-%s-%s-%s" % (prefix, rule.ip_protocol, rule.from_port, rule.to_port, - grant.group_id, grant.cidr_ip)] = rule + dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = rule def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id): @@ -279,7 +289,7 @@ def main(): rule['to_port'] = None # If rule already exists, don't later delete it - ruleId = "%s-%s-%s-%s-%s-%s" % ('in', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip) + ruleId = make_rule_key('in', rule, group_id, ip) if ruleId in groupRules: del groupRules[ruleId] # Otherwise, add new rule @@ -320,7 +330,7 @@ def main(): rule['to_port'] = None # If rule already exists, don't later delete it - ruleId = "%s-%s-%s-%s-%s-%s" % ('out', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip) + ruleId = make_rule_key('out', rule, group_id, ip) if ruleId in groupRules: del groupRules[ruleId] # Otherwise, add new rule From 73172fae477fbb6307fd52c9b830b75ebc4fdc7e Mon Sep 17 00:00:00 2001 From: Kale Franz Date: Wed, 17 Dec 2014 09:01:50 -0800 Subject: [PATCH 180/250] #531 correction correct list comprehension for older versions of python (back to python 2.4) --- cloud/amazon/ec2_group.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_group.py b/cloud/amazon/ec2_group.py index edecc72ce34..59623e96d64 100644 --- a/cloud/amazon/ec2_group.py +++ b/cloud/amazon/ec2_group.py @@ -117,9 +117,9 @@ except ImportError: def make_rule_key(prefix, rule, group_id, cidr_ip): """Creates a unique key for an individual group rule""" if isinstance(rule, dict): - proto, from_port, to_port = (rule.get(x, None) for x in ('proto', 'from_port', 'to_port')) + proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')] else: # isinstance boto.ec2.securitygroup.IPPermissions - proto, from_port, to_port = (getattr(rule, x, None) for x in ('ip_protocol', 'from_port', 'to_port')) + proto, from_port, to_port = [getattr(rule, x, None) for x in ('ip_protocol', 'from_port', 'to_port')] key = "%s-%s-%s-%s-%s-%s" % (prefix, proto, from_port, to_port, group_id, cidr_ip) return key.lower().replace('-none', '-None') From a942e5f85319c6516dbb9f2989cd55b4865b4518 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 17 Dec 2014 12:48:01 -0500 Subject: [PATCH 181/250] Revert "Merge pull request #384 from jhawkesworth/win_copy_file_template_ansible_modules_core_1" I missed some discussion in devel, these need more work before inclusion This reverts commit 58bfebb0477adda2a676381850038e0abf8c8f00, reversing changes made to 27dee77ca0449bdb338b2db89e044d1d9b553b4a. --- windows/win_copy.ps1 | 84 -------------------------------- windows/win_copy.py | 60 ----------------------- windows/win_file.ps1 | 105 ---------------------------------------- windows/win_file.py | 73 ---------------------------- windows/win_stat.ps1 | 6 ++- windows/win_template.py | 52 -------------------- 6 files changed, 4 insertions(+), 376 deletions(-) delete mode 100644 windows/win_copy.ps1 delete mode 100644 windows/win_copy.py delete mode 100644 windows/win_file.ps1 delete mode 100644 windows/win_file.py delete mode 100644 windows/win_template.py diff --git a/windows/win_copy.ps1 b/windows/win_copy.ps1 deleted file mode 100644 index 9ffdab85f03..00000000000 --- a/windows/win_copy.ps1 +++ /dev/null @@ -1,84 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -$src= Get-Attr $params "src" $FALSE; -If ($src -eq $FALSE) -{ - Fail-Json (New-Object psobject) "missing required argument: src"; -} - -$dest= Get-Attr $params "dest" $FALSE; -If ($dest -eq $FALSE) -{ - Fail-Json (New-Object psobject) "missing required argument: dest"; -} - -# seems to be supplied by the calling environment, but -# probably shouldn't be a test for it existing in the params. -# TODO investigate. -$original_basename = Get-Attr $params "original_basename" $FALSE; -If ($original_basename -eq $FALSE) -{ - Fail-Json (New-Object psobject) "missing required argument: original_basename "; -} - -$result = New-Object psobject @{ - changed = $FALSE -}; - -# if $dest is a dir, append $original_basename so the file gets copied with its intended name. -if (Test-Path $dest -PathType Container) -{ - $dest = Join-Path $dest $original_basename; -} - -If (Test-Path $dest) -{ - $dest_checksum = Get-FileChecksum ($dest); - $src_checksum = Get-FileChecksum ($src); - - If (! $src_checksum.CompareTo($dest_checksum)) - { - # New-Item -Force creates subdirs for recursive copies - New-Item -Force $dest -Type file; - Copy-Item -Path $src -Destination $dest -Force; - } - $dest_checksum = Get-FileChecksum ($dest); - If ( $src_checksum.CompareTo($dest_checksum)) - { - $result.changed = $TRUE; - } - Else - { - Fail-Json (New-Object psobject) "Failed to place file"; - } -} -Else -{ - New-Item -Force $dest -Type file; - Copy-Item -Path $src -Destination $dest; - $result.changed = $TRUE; -} - -$dest_checksum = Get-FileChecksum($dest); -$result.checksum = $dest_checksum; - -Exit-Json $result; diff --git a/windows/win_copy.py b/windows/win_copy.py deleted file mode 100644 index 7d0b49e5985..00000000000 --- a/windows/win_copy.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import os -import time - -DOCUMENTATION = ''' ---- -module: win_copy -version_added: "1.8" -short_description: Copies files to remote locations on windows hosts. -description: - - The M(win_copy) module copies a file on the local box to remote windows locations. -options: - src: - description: - - Local path to a file to copy to the remote server; can be absolute or relative. - If path is a directory, it is copied recursively. In this case, if path ends - with "/", only inside contents of that directory are copied to destination. - Otherwise, if it does not end with "/", the directory itself with all contents - is copied. This behavior is similar to Rsync. - required: false - default: null - aliases: [] - dest: - description: - - Remote absolute path where the file should be copied to. If src is a directory, - this must be a directory too. Use \\ for path separators. - required: true - default: null -author: Michael DeHaan -notes: - - The "win_copy" module recursively copy facility does not scale to lots (>hundreds) of files. - Instead, you may find it better to create files locally, perhaps using win_template, and - then use win_get_url to put them in the correct location. -''' - -EXAMPLES = ''' -# Example from Ansible Playbooks -- win_copy: src=/srv/myfiles/foo.conf dest=c:\\TEMP\\foo.conf - -''' - diff --git a/windows/win_file.ps1 b/windows/win_file.ps1 deleted file mode 100644 index 62ac81fc1ee..00000000000 --- a/windows/win_file.ps1 +++ /dev/null @@ -1,105 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -# path -$path = Get-Attr $params "path" $FALSE; -If ($path -eq $FALSE) -{ - $path = Get-Attr $params "dest" $FALSE; - If ($path -eq $FALSE) - { - $path = Get-Attr $params "name" $FALSE; - If ($path -eq $FALSE) - { - Fail-Json (New-Object psobject) "missing required argument: path"; - } - } -} - -# JH Following advice from Chris Church, only allow the following states -# in the windows version for now: -# state - file, directory, touch, absent -# (originally was: state - file, link, directory, hard, touch, absent) - -$state = Get-Attr $params "state" "file"; - -#$recurse = Get-Attr $params "recurse" "no"; - -# force - yes, no -# $force = Get-Attr $params "force" "no"; - -# result -$result = New-Object psobject @{ - changed = $FALSE -}; - -If ( $state -eq "touch" ) -{ - If(Test-Path $path) - { - (Get-ChildItem $path).LastWriteTime = Get-Date - } - Else - { - echo $null > $file - } - $result.changed = $TRUE; -} - -If (Test-Path $path) -{ - $fileinfo = Get-Item $path; - If ( $state -eq "absent" ) - { - Remove-Item -Recurse -Force $fileinfo; - $result.changed = $TRUE; - } - Else - { - # Only files have the .Directory attribute. - If ( $state -eq "directory" -and $fileinfo.Directory ) - { - Fail-Json (New-Object psobject) "path is not a directory"; - } - - # Only files have the .Directory attribute. - If ( $state -eq "file" -and -not $fileinfo.Directory ) - { - Fail-Json (New-Object psobject) "path is not a file"; - } - - } -} -Else -{ - If ( $state -eq "directory" ) - { - New-Item -ItemType directory -Path $path - $result.changed = $TRUE; - } - - If ( $state -eq "file" ) - { - Fail-Json (New-Object psobject) "path will not be created"; - } -} - -Exit-Json $result; diff --git a/windows/win_file.py b/windows/win_file.py deleted file mode 100644 index 6a218216617..00000000000 --- a/windows/win_file.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - - -DOCUMENTATION = ''' ---- -module: win_file -version_added: "1.8" -short_description: Creates, touches or removes files or directories. -extends_documentation_fragment: files -description: - - Creates (empty) files, updates file modification stamps of existing files, - and can create or remove directories. - Unlike M(file), does not modify ownership, permissions or manipulate links. -notes: - - See also M(win_copy), M(win_template), M(copy), M(template), M(assemble) -requirements: [ ] -author: Michael DeHaan -options: - path: - description: - - 'path to the file being managed. Aliases: I(dest), I(name)' - required: true - default: [] - aliases: ['dest', 'name'] - state: - description: - - If C(directory), all immediate subdirectories will be created if they - do not exist. - If C(file), the file will NOT be created if it does not exist, see the M(copy) - or M(template) module if you want that behavior. If C(absent), - directories will be recursively deleted, and files will be removed. - If C(touch), an empty file will be created if the c(path) does not - exist, while an existing file or directory will receive updated file access and - modification times (similar to the way `touch` works from the command line). - required: false - default: file - choices: [ file, directory, touch, absent ] -''' - -EXAMPLES = ''' -# create a file -- win_file: path=C:\\temp\\foo.conf - -# touch a file (creates if not present, updates modification time if present) -- win_file: path=C:\\temp\\foo.conf state=touch - -# remove a file, if present -- win_file: path=C:\\temp\\foo.conf state=absent - -# create directory structure -- win_file: path=C:\\temp\\folder\\subfolder state=directory - -# remove directory structure -- win_file: path=C:\\temp state=absent -''' diff --git a/windows/win_stat.ps1 b/windows/win_stat.ps1 index 10101a62b30..4e4c55b2aa3 100644 --- a/windows/win_stat.ps1 +++ b/windows/win_stat.ps1 @@ -53,9 +53,11 @@ Else If ($get_md5 -and $result.stat.exists -and -not $result.stat.isdir) { - $hash = Get-FileChecksum($path); + $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider; + $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); + $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); + $fp.Dispose(); Set-Attr $result.stat "md5" $hash; - Set-Attr $result.stat "checksum" $hash; } Exit-Json $result; diff --git a/windows/win_template.py b/windows/win_template.py deleted file mode 100644 index 402702f93b2..00000000000 --- a/windows/win_template.py +++ /dev/null @@ -1,52 +0,0 @@ -# this is a virtual module that is entirely implemented server side - -DOCUMENTATION = ''' ---- -module: win_template -version_added: 1.8 -short_description: Templates a file out to a remote server. -description: - - Templates are processed by the Jinja2 templating language - (U(http://jinja.pocoo.org/docs/)) - documentation on the template - formatting can be found in the Template Designer Documentation - (U(http://jinja.pocoo.org/docs/templates/)). - - "Six additional variables can be used in templates: C(ansible_managed) - (configurable via the C(defaults) section of C(ansible.cfg)) contains a string - which can be used to describe the template name, host, modification time of the - template file and the owner uid, C(template_host) contains the node name of - the template's machine, C(template_uid) the owner, C(template_path) the - absolute path of the template, C(template_fullpath) is the absolute path of the - template, and C(template_run_date) is the date that the template was rendered. Note that including - a string that uses a date in the template will result in the template being marked 'changed' - each time." -options: - src: - description: - - Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path. - required: true - default: null - aliases: [] - dest: - description: - - Location to render the template to on the remote machine. - required: true - default: null - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - required: false - choices: [ "yes", "no" ] - default: "no" -notes: - - "templates are loaded with C(trim_blocks=True)." -requirements: [] -author: Michael DeHaan -''' - -EXAMPLES = ''' -# Example -- win_template: src=/mytemplates/foo.j2 dest=C:\\temp\\file.conf - - -''' From b650ad1671a0f0cd5f2f139407bc793b6f1fd6a3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 17 Dec 2014 13:36:19 -0800 Subject: [PATCH 182/250] Make git's update parameter revert to its old behaviour and add new clone parameter to take its place. Fixes #426 Fixes https://github.com/ansible/ansible/issues/8630 --- source_control/git.py | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index a2de91a15fe..d6d4b58f84e 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -36,9 +36,9 @@ options: required: true description: - Absolute path of where the repository should be checked out to. - This parameter is required, unless C(update) is set to C(no) - This change was made in version 1.8. Prior to this version, the - C(dest) parameter was always required. + This parameter is required, unless C(clone) is set to C(no) + This change was made in version 1.8.3. Prior to this version, + the C(dest) parameter was always required. version: required: false default: "HEAD" @@ -97,6 +97,13 @@ options: - Create a shallow clone with a history truncated to the specified number or revisions. The minimum possible value is C(1), otherwise ignored. + clone: + required: false + default: "yes" + choices: [ "yes", "no" ] + version_added: "1.8.3" + description: + - If C(no), do not clone the repository if it does not exist locally update: required: false default: "yes" @@ -158,7 +165,7 @@ EXAMPLES = ''' - git: repo=ssh://git@github.com/mylogin/hello.git dest=/home/mylogin/hello # Example just ensuring the repo checkout exists -- git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout update=no +- git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout clone=no update=no ''' import re @@ -588,6 +595,7 @@ def main(): reference=dict(default=None), force=dict(default='yes', type='bool'), depth=dict(default=None, type='int'), + clone=dict(default='yes', type='bool'), update=dict(default='yes', type='bool'), accept_hostkey=dict(default='no', type='bool'), key_file=dict(default=None, required=False), @@ -607,6 +615,7 @@ def main(): force = module.params['force'] depth = module.params['depth'] update = module.params['update'] + allow_clone = module.params['clone'] bare = module.params['bare'] reference = module.params['reference'] git_path = module.params['executable'] or module.get_bin_path('git', True) @@ -614,8 +623,8 @@ def main(): ssh_opts = module.params['ssh_opts'] gitconfig = None - if not dest and update: - module.fail_json(msg="the destination directory must be specified unless update=no") + if not dest and allow_clone: + module.fail_json(msg="the destination directory must be specified unless clone=no") elif dest: dest = os.path.abspath(os.path.expanduser(dest)) if bare: @@ -651,11 +660,12 @@ def main(): before = None local_mods = False repo_updated = None - if gitconfig and not os.path.exists(gitconfig) or not gitconfig and not update: - # if there is no git configuration, do a clone operation unless the - # user requested no updates or we're doing a check mode test (in - # which case we do a ls-remote), otherwise clone the repo - if module.check_mode or not update: + if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone): + # if there is no git configuration, do a clone operation unless: + # * the user requested no clone (they just want info) + # * we're doing a check mode test + # In those cases we do an ls-remote + if module.check_mode or not allow_clone: remote_head = get_remote_head(git_path, module, dest, version, repo, bare) module.exit_json(changed=True, before=before, after=remote_head) # there's no git config, so clone From dfe7f6c6d631d665232f1f033eba2e2fe5542364 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 17 Dec 2014 13:45:01 -0800 Subject: [PATCH 183/250] Probably would make clone a 1.9 feature rather than 1.8.3 --- source_control/git.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source_control/git.py b/source_control/git.py index d6d4b58f84e..3b627b2594e 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -101,7 +101,7 @@ options: required: false default: "yes" choices: [ "yes", "no" ] - version_added: "1.8.3" + version_added: "1.9" description: - If C(no), do not clone the repository if it does not exist locally update: From d564569910ea8a71b954c73807776a98c3a00153 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 17 Dec 2014 19:55:54 -0500 Subject: [PATCH 184/250] update to use connect_to_region to avoid errors with china --- cloud/amazon/cloudformation.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index 1c8a9d6aca5..b382e3f05ff 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -130,13 +130,6 @@ except ImportError: sys.exit(1) -class Region: - def __init__(self, region): - '''connects boto to the region specified in the cloudformation template''' - self.name = region - self.endpoint = 'cloudformation.%s.amazonaws.com' % region - - def boto_exception(err): '''generic error message handler''' if hasattr(err, 'error_message'): @@ -239,11 +232,10 @@ def main(): stack_outputs = {} try: - cf_region = Region(region) - cfn = boto.cloudformation.connection.CloudFormationConnection( - aws_access_key_id=aws_access_key, + cfn = boto.cloudformation.connect_to_region( + region, + aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, - region=cf_region, ) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) From 80d45c78a3dec3a2a048a33fad66b5f977072c0f Mon Sep 17 00:00:00 2001 From: Ingo Gottwald Date: Sun, 19 Oct 2014 22:15:48 +0200 Subject: [PATCH 185/250] Add support for new docker restart policies --- cloud/docker/docker.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index a65b65bf902..0902c4a296e 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -210,6 +210,20 @@ options: default: '' aliases: [] version_added: "1.8" + restart_policy: + description: + - Set the container restart policy + required: false + default: false + aliases: [] + version_added: "1.9" + restart_policy_retry: + description: + - Set the retry limit for container restart policy + required: false + default: false + aliases: [] + version_added: "1.9" author: Cove Schneider, Joshua Conner, Pavel Antonov requirements: [ "docker-py >= 0.3.0", "docker >= 0.10.0" ] @@ -665,6 +679,12 @@ class DockerManager: params['dns'] = self.module.params.get('dns') params['volumes_from'] = self.module.params.get('volumes_from') + if docker.utils.compare_version('1.14', self.client.version()['ApiVersion']) >= 0 and hasattr(docker, '__version__') and docker.__version__ >= '0.5.0': + if self.module.params.get('restart_policy') is not None: + params['restart_policy'] = { 'Name': self.module.params.get('restart_policy') } + if params['restart_policy']['Name'] == 'on-failure': + params['restart_policy']['MaximumRetryCount'] = self.module.params.get('restart_policy_retry') + for i in containers: self.client.start(i['Id'], **params) self.increment_counter('started') @@ -742,6 +762,8 @@ def main(): dns = dict(), detach = dict(default=True, type='bool'), state = dict(default='running', choices=['absent', 'present', 'running', 'stopped', 'killed', 'restarted']), + restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']), + restart_policy_retry = dict(default=0, type='int'), debug = dict(default=False, type='bool'), privileged = dict(default=False, type='bool'), stdin_open = dict(default=False, type='bool'), From fa0fa9d2214e165904c69286e86632a2aecf47eb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 18 Dec 2014 11:23:44 -0800 Subject: [PATCH 186/250] Make docker ver checks issue failures rather than silently ignoring Also: * make client version checks robust for two digit version pieces and alpha versions * consolidate version checking code --- cloud/docker/docker.py | 142 ++++++++++++++++++++++++++++++----------- 1 file changed, 106 insertions(+), 36 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 0902c4a296e..fb24bb0fe08 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -409,9 +409,60 @@ def get_split_image_tag(image): return resource, tag +def get_docker_py_versioninfo(): + if hasattr(docker, '__version__'): + # a '__version__' attribute was added to the module but not until + # after 0.3.0 was pushed to pypi. If it's there, use it. + version = [] + for part in docker.__version__.split('.'): + try: + version.append(int(part)) + except ValueError: + for idx, char in enumerate(part): + if not char.isdigit(): + nondigit = part[idx:] + digit = part[:idx] + if digit: + version.append(int(digit)) + if nondigit: + version.append(nondigit) + elif hasattr(docker.Client, '_get_raw_response_socket'): + # HACK: if '__version__' isn't there, we check for the existence of + # `_get_raw_response_socket` in the docker.Client class, which was + # added in 0.3.0 + version = (0, 3, 0) + else: + # This is untrue but this module does not function with a version less + # than 0.3.0 so it's okay to lie here. + version = (0,) + + return version + +def check_dependencies(module): + """ + Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a + helpful error message if it isn't. + """ + if not HAS_DOCKER_PY: + module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.") + else: + versioninfo = get_docker_py_versioninfo() + if versioninfo < (0, 3, 0): + module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.") + + class DockerManager: counters = {'created':0, 'started':0, 'stopped':0, 'killed':0, 'removed':0, 'restarted':0, 'pull':0} + _capabilities = set() + # Map optional parameters to minimum (docker-py version, server APIVersion) + # docker-py version is a tuple of ints because we have to compare them + # server APIVersion is passed to a docker-py function that takes strings + _cap_ver_req = { + 'dns': ((0, 3, 0), '1.10'), + 'volume_from': ((0, 3, 0), '1.10'), + 'restart_policy': ((0, 5, 0), '1.14'), + } def __init__(self, module): self.module = module @@ -466,6 +517,39 @@ class DockerManager: docker_api_version = module.params.get('docker_api_version') self.client = docker.Client(base_url=docker_url.geturl(), version=docker_api_version) + self.docker_py_versioninfo = get_docker_py_versioninfo() + + def _check_capabilties(self): + """ + Create a list of available capabilities + """ + api_version = self.client.version()['ApiVersion'] + for cap, req_vers in self._cap_ver_req.items(): + if (self.docker_py_versioninfo >= req_vers[0] and + docker.utils.compare_version(req_vers[1], api_version) >= 0): + self._capabilities.add(cap) + + def ensure_capability(self, capability): + """ + Some of the functionality this ansible module implements are only + available in newer versions of docker. Ensure that the capability + is available here. + """ + if not self._capabilities: + self._check_capabilties() + + if capability in self._capabilities: + return True + + api_version = self.client.version()['ApiVersion'] + self.module.fail_json(msg='Specifying the `%s` parameter requires' + ' docker-py: %s, docker server apiversion %s; found' + ' docker-py: %s, server: %s' % ( + capability, + '.'.join(self._cap_ver_req[capability][0]), + self._cap_ver_req[capability][1], + '.'.join(self.docker_py_versioninfo), + api_version)) def get_links(self, links): """ @@ -628,9 +712,11 @@ class DockerManager: 'tty': self.module.params.get('tty'), } - if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) < 0: - params['dns'] = self.module.params.get('dns') - params['volumes_from'] = self.module.params.get('volumes_from') + if params['dns'] is not None: + self.ensure_capability('dns') + + if params['volumes_from'] is not None: + self.ensure_capability('volumes_from') def do_create(count, params): results = [] @@ -675,15 +761,24 @@ class DockerManager: 'links': self.links, 'network_mode': self.module.params.get('net'), } - if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) >= 0 and hasattr(docker, '__version__') and docker.__version__ > '0.3.0': - params['dns'] = self.module.params.get('dns') - params['volumes_from'] = self.module.params.get('volumes_from') - if docker.utils.compare_version('1.14', self.client.version()['ApiVersion']) >= 0 and hasattr(docker, '__version__') and docker.__version__ >= '0.5.0': - if self.module.params.get('restart_policy') is not None: - params['restart_policy'] = { 'Name': self.module.params.get('restart_policy') } - if params['restart_policy']['Name'] == 'on-failure': - params['restart_policy']['MaximumRetryCount'] = self.module.params.get('restart_policy_retry') + optionals = [] + for optional_param in ('dns', 'volumes_from', 'restart_policy', 'restart_policy_retry'): + optionals[optional_param] = self.module.params.get(optional_param) + + if optionals['dns'] is not None: + self.ensure_capability('dns') + params['dns'] = optionals['dns'] + + if optionals['volumes_from'] is not None: + self.ensure_capability('volumes_from') + params['volumes_from'] = optionals['volumes_from'] + + if optionals['restart_policy'] is not None: + self.ensure_capability('restart_policy') + params['restart_policy'] = { 'Name': optionals['restart_policy'] } + if params['restart_policy']['Name'] == 'on-failure': + params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] for i in containers: self.client.start(i['Id'], **params) @@ -712,31 +807,6 @@ class DockerManager: self.increment_counter('restarted') -def check_dependencies(module): - """ - Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a - helpful error message if it isn't. - """ - if not HAS_DOCKER_PY: - module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.") - else: - HAS_NEW_ENOUGH_DOCKER_PY = False - if hasattr(docker, '__version__'): - # a '__version__' attribute was added to the module but not until - # after 0.3.0 was added pushed to pip. If it's there, use it. - if docker.__version__ >= '0.3.0': - HAS_NEW_ENOUGH_DOCKER_PY = True - else: - # HACK: if '__version__' isn't there, we check for the existence of - # `_get_raw_response_socket` in the docker.Client class, which was - # added in 0.3.0 - if hasattr(docker.Client, '_get_raw_response_socket'): - HAS_NEW_ENOUGH_DOCKER_PY = True - - if not HAS_NEW_ENOUGH_DOCKER_PY: - module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.") - - def main(): module = AnsibleModule( argument_spec = dict( From 4172d445d3bd8699add652f3b7dc4693397f8d92 Mon Sep 17 00:00:00 2001 From: Maksim Losev Date: Sun, 14 Dec 2014 06:56:30 +0300 Subject: [PATCH 187/250] Add insecure_registry to docker-py pull method Starting from docker-py>=0.5.0 it is impossible to work with private registries based on HTTP. So we need additional parameter to allow pull from insecure registry Related to ansible/ansible#9111 --- cloud/docker/docker.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index fb24bb0fe08..d796c17407d 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -224,6 +224,13 @@ options: default: false aliases: [] version_added: "1.9" + insecure_registry: + description: + - Use insecure private registry by HTTP instead of HTTPS (needed for docker-py >= 0.5.0). + required: false + default: false + aliases: [] + version_added: "1.9" author: Cove Schneider, Joshua Conner, Pavel Antonov requirements: [ "docker-py >= 0.3.0", "docker >= 0.10.0" ] @@ -743,7 +750,7 @@ class DockerManager: except: self.module.fail_json(msg="failed to login to the remote registry, check your username/password.") try: - self.client.pull(image, tag=tag) + self.client.pull(image, tag=tag, insecure_registry=self.module.params.get('insecure_registry')) except: self.module.fail_json(msg="failed to pull the specified image: %s" % resource) self.increment_counter('pull') @@ -840,7 +847,8 @@ def main(): tty = dict(default=False, type='bool'), lxc_conf = dict(default=None, type='list'), name = dict(default=None), - net = dict(default=None) + net = dict(default=None), + insecure_registry = dict(default=False, type='bool'), ) ) From f65d9ab79326a67430beac4ea4671f9a0e6e7097 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 18 Dec 2014 12:45:13 -0800 Subject: [PATCH 188/250] Make insecure_registry feature version check against the client API --- cloud/docker/docker.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index d796c17407d..0717e60d560 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -469,6 +469,8 @@ class DockerManager: 'dns': ((0, 3, 0), '1.10'), 'volume_from': ((0, 3, 0), '1.10'), 'restart_policy': ((0, 5, 0), '1.14'), + # Clientside only + 'insecure_registry': ((0, 5, 0), '0.0') } def __init__(self, module): @@ -536,11 +538,15 @@ class DockerManager: docker.utils.compare_version(req_vers[1], api_version) >= 0): self._capabilities.add(cap) - def ensure_capability(self, capability): + def ensure_capability(self, capability, fail=True): """ Some of the functionality this ansible module implements are only available in newer versions of docker. Ensure that the capability is available here. + + If fail is set to False then return True or False depending on whether + we have the capability. Otherwise, simply fail and exit the module if + we lack the capability. """ if not self._capabilities: self._check_capabilties() @@ -548,6 +554,9 @@ class DockerManager: if capability in self._capabilities: return True + if not fail: + return False + api_version = self.client.version()['ApiVersion'] self.module.fail_json(msg='Specifying the `%s` parameter requires' ' docker-py: %s, docker server apiversion %s; found' @@ -725,6 +734,11 @@ class DockerManager: if params['volumes_from'] is not None: self.ensure_capability('volumes_from') + extra_params = {} + if self.module.params.get('insecure_registry'): + if self.ensure_capability('insecure_registry', fail=False): + extra_params['insecure_registry'] = self.module.params.get('insecure_registry') + def do_create(count, params): results = [] for _ in range(count): @@ -750,7 +764,7 @@ class DockerManager: except: self.module.fail_json(msg="failed to login to the remote registry, check your username/password.") try: - self.client.pull(image, tag=tag, insecure_registry=self.module.params.get('insecure_registry')) + self.client.pull(image, tag=tag, **extra_params) except: self.module.fail_json(msg="failed to pull the specified image: %s" % resource) self.increment_counter('pull') From c4f9366e913570e24f44e4f192741a28cb121af9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 18 Dec 2014 13:43:55 -0800 Subject: [PATCH 189/250] Newstyle class --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 0717e60d560..f53819f2679 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -458,7 +458,7 @@ def check_dependencies(module): module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.") -class DockerManager: +class DockerManager(object): counters = {'created':0, 'started':0, 'stopped':0, 'killed':0, 'removed':0, 'restarted':0, 'pull':0} _capabilities = set() From c242de1a396614f8b37df2e687bd0f73332aa300 Mon Sep 17 00:00:00 2001 From: Rohan McGovern Date: Thu, 13 Nov 2014 08:14:31 +1000 Subject: [PATCH 190/250] git: clean up "fetch" method De-duplicate repetitive code checking the exit code. Include the stdout/stderr of the failed process in all cases. Remove the returned values because no caller uses them. Combine git commands where possible. There is no need to fetch branches and tags as two separate operations. --- source_control/git.py | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index 3b627b2594e..f3cb329faf1 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -453,33 +453,20 @@ def get_head_branch(git_path, module, dest, remote, bare=False): def fetch(git_path, module, repo, dest, version, remote, bare): ''' updates repo from remote sources ''' - out_acc = [] - err_acc = [] - (rc, out0, err0) = module.run_command([git_path, 'remote', 'set-url', remote, repo], cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to set a new url %s for %s: %s" % (repo, remote, out0 + err0)) - if bare: - (rc, out1, err1) = module.run_command([git_path, 'fetch', remote, '+refs/heads/*:refs/heads/*'], cwd=dest) - else: - (rc, out1, err1) = module.run_command("%s fetch %s" % (git_path, remote), cwd=dest) - out_acc.append(out1) - err_acc.append(err1) - if rc != 0: - module.fail_json(msg="Failed to download remote objects and refs: %s %s" % - (''.join(out_acc), ''.join(err_acc))) + commands = [["set a new url %s for %s" % (repo, remote)], [git_path, 'remote', 'set-url', remote, repo]] + + fetch_str = 'download remote objects and refs' if bare: - (rc, out2, err2) = module.run_command([git_path, 'fetch', remote, '+refs/tags/*:refs/tags/*'], cwd=dest) + refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*'] + commands.append([fetch_str, [git_path, 'fetch', remote] + refspecs]) else: - (rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote), cwd=dest) - out_acc.append(out2) - err_acc.append(err2) - if rc != 0: - module.fail_json(msg="Failed to download remote objects and refs: %s %s" % - (''.join(out_acc), ''.join(err_acc))) - - return (rc, ''.join(out_acc), ''.join(err_acc)) + commands.append([fetch_str, [git_path, 'fetch', '--tags']]) + for (label,command) in commands: + (rc,out,err) = module.run_command(command, cwd=dest) + if rc != 0: + module.fail_json(msg="Failed to %s: %s %s" % (label, out, err)) def submodules_fetch(git_path, module, remote, track_submodules, dest): changed = False From cf8504728490c352172156034d93a81a03ef8c39 Mon Sep 17 00:00:00 2001 From: Rohan McGovern Date: Fri, 21 Nov 2014 12:27:03 +1000 Subject: [PATCH 191/250] git: add 'refspec' argument This argument may be used to fetch additional refs beyond the default refs/heads/* and refs/tags/*. Checking out GitHub pull requests or Gerrit patch sets are two examples where this is useful. Without this, specifying version= with a SHA1 unreachable from any tag or branch can't work. --- source_control/git.py | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index f3cb329faf1..dbea32d7d60 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -80,6 +80,17 @@ options: default: "origin" description: - Name of the remote. + refspec: + required: false + default: null + version_added: "1.9" + description: + - Add an additional refspec to be fetched. + If version is set to a I(SHA-1) not reachable from any branch + or tag, this option may be necessary to specify the ref containing + the I(SHA-1). + Uses the same syntax as the 'git fetch' command. + An example value could be "refs/meta/config". force: required: false default: "yes" @@ -166,6 +177,9 @@ EXAMPLES = ''' # Example just ensuring the repo checkout exists - git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout clone=no update=no + +# Example checkout a github repo and use refspec to fetch all pull requests +- git: repo=https://github.com/ansible/ansible-examples.git dest=/src/ansible-examples refspec=+refs/pull/*:refs/heads/* ''' import re @@ -279,7 +293,7 @@ def get_submodule_versions(git_path, module, dest, version='HEAD'): return submodules def clone(git_path, module, repo, dest, remote, depth, version, bare, - reference): + reference, refspec): ''' makes a new git repo if it does not already exist ''' dest_dirname = os.path.dirname(dest) try: @@ -304,6 +318,9 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare, if remote != 'origin': module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest) + if refspec: + module.run_command([git_path, 'fetch', remote, refspec], check_rc=True, cwd=dest) + def has_local_mods(module, git_path, dest, bare): if bare: return False @@ -451,7 +468,7 @@ def get_head_branch(git_path, module, dest, remote, bare=False): f.close() return branch -def fetch(git_path, module, repo, dest, version, remote, bare): +def fetch(git_path, module, repo, dest, version, remote, bare, refspec): ''' updates repo from remote sources ''' commands = [["set a new url %s for %s" % (repo, remote)], [git_path, 'remote', 'set-url', remote, repo]] @@ -459,9 +476,16 @@ def fetch(git_path, module, repo, dest, version, remote, bare): if bare: refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*'] + if refspec: + refspecs.append(refspec) commands.append([fetch_str, [git_path, 'fetch', remote] + refspecs]) else: commands.append([fetch_str, [git_path, 'fetch', '--tags']]) + if refspec: + # unlike in bare mode, there's no way to combine the + # additional refspec with the default git fetch behavior, + # so use two commands + commands.append([fetch_str, [git_path, 'fetch', remote, refspec]]) for (label,command) in commands: (rc,out,err) = module.run_command(command, cwd=dest) @@ -579,6 +603,7 @@ def main(): repo=dict(required=True, aliases=['name']), version=dict(default='HEAD'), remote=dict(default='origin'), + refspec=dict(default=None), reference=dict(default=None), force=dict(default='yes', type='bool'), depth=dict(default=None, type='int'), @@ -599,6 +624,7 @@ def main(): repo = module.params['repo'] version = module.params['version'] remote = module.params['remote'] + refspec = module.params['refspec'] force = module.params['force'] depth = module.params['depth'] update = module.params['update'] @@ -656,7 +682,7 @@ def main(): remote_head = get_remote_head(git_path, module, dest, version, repo, bare) module.exit_json(changed=True, before=before, after=remote_head) # there's no git config, so clone - clone(git_path, module, repo, dest, remote, depth, version, bare, reference) + clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec) repo_updated = True elif not update: # Just return having found a repo already in the dest path @@ -690,7 +716,7 @@ def main(): if repo_updated is None: if module.check_mode: module.exit_json(changed=True, before=before, after=remote_head) - fetch(git_path, module, repo, dest, version, remote, bare) + fetch(git_path, module, repo, dest, version, remote, bare, refspec) repo_updated = True # switch to version specified regardless of whether From 0a297e54bac75d86cd33b0c299f95d82b643e0ab Mon Sep 17 00:00:00 2001 From: Chris Church Date: Fri, 19 Dec 2014 03:06:58 -0500 Subject: [PATCH 192/250] Change version added in win_user docs to 1.9 for updates that didn't make it into 1.8. --- windows/win_user.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/windows/win_user.py b/windows/win_user.py index cd981916f1a..82bcf0897ec 100644 --- a/windows/win_user.py +++ b/windows/win_user.py @@ -38,13 +38,13 @@ options: - Full name of the user required: false default: null - version_added: "1.8" + version_added: "1.9" description: description: - Description of the user required: false default: null - version_added: "1.8" + version_added: "1.9" password: description: - Optionally set the user's password to this (plain text) value. @@ -57,7 +57,7 @@ options: required: false choices: [ 'always', 'on_create' ] default: always - version_added: "1.8" + version_added: "1.9" password_expired: description: - C(yes) will require the user to change their password at next login. @@ -65,7 +65,7 @@ options: required: false choices: [ 'yes', 'no' ] default: null - version_added: "1.8" + version_added: "1.9" password_never_expires: description: - C(yes) will set the password to never expire. C(no) will allow the @@ -73,7 +73,7 @@ options: required: false choices: [ 'yes', 'no' ] default: null - version_added: "1.8" + version_added: "1.9" user_cannot_change_password: description: - C(yes) will prevent the user from changing their password. C(no) will @@ -81,7 +81,7 @@ options: required: false choices: [ 'yes', 'no' ] default: null - version_added: "1.8" + version_added: "1.9" account_disabled: description: - C(yes) will disable the user account. C(no) will clear the disabled @@ -89,14 +89,14 @@ options: required: false choices: [ 'yes', 'no' ] default: null - version_added: "1.8" + version_added: "1.9" account_locked: description: - C(no) will unlock the user account if locked. required: false choices: [ 'no' ] default: null - version_added: "1.8" + version_added: "1.9" groups: description: - Adds or removes the user from this comma-separated lis of groups, @@ -104,7 +104,7 @@ options: C(replace) and I(groups) is set to the empty string ('groups='), the user is removed from all groups. required: false - version_added: "1.8" + version_added: "1.9" groups_action: description: - If C(replace), the user is added as a member of each group in @@ -114,11 +114,11 @@ options: required: false choices: [ "replace", "add", "remove" ] default: "replace" - version_added: "1.8" + version_added: "1.9" state: description: - When C(present), creates or updates the user account. When C(absent), - removes the user account if it exists. When C(query) (new in 1.8), + removes the user account if it exists. When C(query) (new in 1.9), retrieves the user account details without making any changes. required: false choices: From ee324fc3ad2850b283461e4f62ee7a39e0b5de40 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 19 Dec 2014 08:55:41 -0800 Subject: [PATCH 193/250] Make documentation clear about update=no vs clone=no --- source_control/git.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source_control/git.py b/source_control/git.py index 3b627b2594e..ee8b9a6d29a 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -165,6 +165,10 @@ EXAMPLES = ''' - git: repo=ssh://git@github.com/mylogin/hello.git dest=/home/mylogin/hello # Example just ensuring the repo checkout exists +- git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout update=no + +# Example just get information about the repository whether or not it has +# already been cloned locally. - git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout clone=no update=no ''' From 18c429d016218ea7ab8559f45773b53d39961f87 Mon Sep 17 00:00:00 2001 From: Peter Oliver Date: Wed, 17 Dec 2014 12:44:58 +0000 Subject: [PATCH 194/250] Fix appending to a user's group on Solaris Without this change, you get: AttributeError: 'set' object has no attribute 'extend' Tested on a Solaris 11.2 client with the included Python 2.6.8. --- system/user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/user.py b/system/user.py index 30ae29d30ae..aaeee5683d3 100644 --- a/system/user.py +++ b/system/user.py @@ -1255,7 +1255,7 @@ class SunOS(User): cmd.append('-G') new_groups = groups if self.append: - new_groups.extend(current_groups) + new_groups.update(current_groups) cmd.append(','.join(new_groups)) if self.comment is not None and info[4] != self.comment: From 8a03af66083da993c47a970cde44ab8fc39744b6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 22 Dec 2014 12:11:49 -0800 Subject: [PATCH 195/250] Fix typo in git refspec code. Change lists to tuples --- source_control/git.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index 968ae77bcd1..f67abe32fa2 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -474,7 +474,7 @@ def get_head_branch(git_path, module, dest, remote, bare=False): def fetch(git_path, module, repo, dest, version, remote, bare, refspec): ''' updates repo from remote sources ''' - commands = [["set a new url %s for %s" % (repo, remote)], [git_path, 'remote', 'set-url', remote, repo]] + commands = [("set a new url %s for %s" % (repo, remote), [git_path, 'remote', 'set-url', remote, repo])] fetch_str = 'download remote objects and refs' @@ -482,14 +482,14 @@ def fetch(git_path, module, repo, dest, version, remote, bare, refspec): refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*'] if refspec: refspecs.append(refspec) - commands.append([fetch_str, [git_path, 'fetch', remote] + refspecs]) + commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs)) else: - commands.append([fetch_str, [git_path, 'fetch', '--tags']]) + commands.append((fetch_str, [git_path, 'fetch', '--tags'])) if refspec: # unlike in bare mode, there's no way to combine the # additional refspec with the default git fetch behavior, # so use two commands - commands.append([fetch_str, [git_path, 'fetch', remote, refspec]]) + commands.append((fetch_str, [git_path, 'fetch', remote, refspec])) for (label,command) in commands: (rc,out,err) = module.run_command(command, cwd=dest) From 8f6ae92cf88beda287c6c11d8b4127239c3168e0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 22 Dec 2014 15:08:25 -0800 Subject: [PATCH 196/250] git fetch --tags overwrites normal fetching with git < 1.8.x so do a normal fetch followed by using the refspec format for fetching tags --- source_control/git.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/source_control/git.py b/source_control/git.py index f67abe32fa2..44ebf06487a 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -484,12 +484,14 @@ def fetch(git_path, module, repo, dest, version, remote, bare, refspec): refspecs.append(refspec) commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs)) else: - commands.append((fetch_str, [git_path, 'fetch', '--tags'])) + # unlike in bare mode, there's no way to combine the + # additional refspec with the default git fetch behavior, + # so use two commands + commands.append((fetch_str, [git_path, 'fetch', remote])) + refspecs = ['+refs/tags/*:refs/tags/*'] if refspec: - # unlike in bare mode, there's no way to combine the - # additional refspec with the default git fetch behavior, - # so use two commands - commands.append((fetch_str, [git_path, 'fetch', remote, refspec])) + refspecs.append(refspec) + commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs)) for (label,command) in commands: (rc,out,err) = module.run_command(command, cwd=dest) From 82601fdc546bf9de70c71ccaf5ac323f918168f3 Mon Sep 17 00:00:00 2001 From: Jan Weitz Date: Wed, 24 Dec 2014 03:04:04 +0100 Subject: [PATCH 197/250] Fixes version check for docker-py --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index f53819f2679..00c805b8f85 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -443,7 +443,7 @@ def get_docker_py_versioninfo(): # than 0.3.0 so it's okay to lie here. version = (0,) - return version + return tuple(version) def check_dependencies(module): """ From ebf9b8c6e289024f46d18ed0cd567fac9156ac83 Mon Sep 17 00:00:00 2001 From: Jan Weitz Date: Wed, 24 Dec 2014 04:03:22 +0100 Subject: [PATCH 198/250] Fixes invalid dictionary access. --- cloud/docker/docker.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 00c805b8f85..03bf8a4af03 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -726,6 +726,8 @@ class DockerManager(object): 'name': self.module.params.get('name'), 'stdin_open': self.module.params.get('stdin_open'), 'tty': self.module.params.get('tty'), + 'dns': self.module.params.get('dns'), + 'volumes_from': self.module.params.get('volumes_from'), } if params['dns'] is not None: @@ -783,7 +785,7 @@ class DockerManager(object): 'network_mode': self.module.params.get('net'), } - optionals = [] + optionals = {} for optional_param in ('dns', 'volumes_from', 'restart_policy', 'restart_policy_retry'): optionals[optional_param] = self.module.params.get(optional_param) From 8ede9de895264d95dfd3cf977dc1281c7cac0efd Mon Sep 17 00:00:00 2001 From: Mark Phillips Date: Wed, 24 Dec 2014 12:39:48 +0000 Subject: [PATCH 199/250] vsphere_guest fix for KeyError: folder message --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 86cc9f00fa7..817421011d2 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -684,7 +684,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, hfmor = dcprops.hostFolder._obj # virtualmachineFolder managed object reference - if vm_extra_config['folder']: + if vm_extra_config.get('folder'): if vm_extra_config['folder'] not in vsphere_client._get_managed_objects(MORTypes.Folder).values(): vsphere_client.disconnect() module.fail_json(msg="Cannot find folder named: %s" % vm_extra_config['folder']) From 55b85ddc46b8cff5d6756ce4d4c1ebdffba419bc Mon Sep 17 00:00:00 2001 From: FabioBatSilva Date: Tue, 23 Dec 2014 19:54:25 -0500 Subject: [PATCH 200/250] fix compatibility issues with python-apt < 0.7.9 --- packaging/os/apt.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 6e2f26f7237..d5ae62d5320 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -206,7 +206,12 @@ def package_status(m, pkgname, version, cache, state): package_is_installed = pkg.isInstalled if version: - avail_upgrades = fnmatch.filter((p.version for p in pkg.versions), version) + try: + avail_upgrades = fnmatch.filter((p.version for p in pkg.versions), version) + except AttributeError: + # assume older version of python-apt is installed + # apt.package.Package#versions require python-apt >= 0.7.9. + avail_upgrades = [] if package_is_installed: try: From a08165ca2e05808555077023ffaa1cd4a1499e48 Mon Sep 17 00:00:00 2001 From: fabios Date: Wed, 24 Dec 2014 17:55:44 -0500 Subject: [PATCH 201/250] use low-level apt_pkg.Package --- packaging/os/apt.py | 33 ++++++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index d5ae62d5320..e04b426fa86 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -173,6 +173,29 @@ def package_split(pkgspec): else: return parts[0], None +def package_versions(pkgname, pkg, pkg_cache): + versions = {} + + try: + for p in pkg.versions: + versions[p.version] = p.version + except AttributeError: + # assume older version of python-apt is installed + # apt.package.Package#versions require python-apt >= 0.7.9. + pkg_cache_list = filter(lambda p: p.Name == pkgname, pkg_cache.Packages) + + for pkg_cache in pkg_cache_list: + for p in pkg_cache.VersionList: + versions[p.VerStr] = p.VerStr + + return versions + +def package_version_compare(version, other_version): + try: + return apt_pkg.version_compare(version, other_version) + except AttributeError: + return apt_pkg.VersionCompare(version, other_version) + def package_status(m, pkgname, version, cache, state): try: # get the package from the cache, as well as the @@ -206,12 +229,8 @@ def package_status(m, pkgname, version, cache, state): package_is_installed = pkg.isInstalled if version: - try: - avail_upgrades = fnmatch.filter((p.version for p in pkg.versions), version) - except AttributeError: - # assume older version of python-apt is installed - # apt.package.Package#versions require python-apt >= 0.7.9. - avail_upgrades = [] + versions = package_versions(pkgname, pkg, cache._cache) + avail_upgrades = fnmatch.filter(versions, version) if package_is_installed: try: @@ -225,7 +244,7 @@ def package_status(m, pkgname, version, cache, state): # Only claim the package is upgradable if a candidate matches the version package_is_upgradable = False for candidate in avail_upgrades: - if pkg.versions[candidate] > pkg.installed: + if package_version_compare(versions[candidate], installed_version) > 0: package_is_upgradable = True break else: From 170457413dd179c3154a4184cbe12ad1ab14c86e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 25 Dec 2014 00:25:02 -0800 Subject: [PATCH 202/250] Cleanup the old apt compatibility changes --- packaging/os/apt.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index e04b426fa86..43a7d6b390b 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -174,19 +174,14 @@ def package_split(pkgspec): return parts[0], None def package_versions(pkgname, pkg, pkg_cache): - versions = {} - try: - for p in pkg.versions: - versions[p.version] = p.version + versions = set(p.version for p in pkg.versions) except AttributeError: # assume older version of python-apt is installed # apt.package.Package#versions require python-apt >= 0.7.9. - pkg_cache_list = filter(lambda p: p.Name == pkgname, pkg_cache.Packages) - - for pkg_cache in pkg_cache_list: - for p in pkg_cache.VersionList: - versions[p.VerStr] = p.VerStr + pkg_cache_list = (p for p in pkg_cache.Packages if p.Name == pkgname) + pkg_versions = (p.VersionList for p in pkg_cache_list) + versions = set(p.VerStr for p in pkg_versions) return versions @@ -244,7 +239,7 @@ def package_status(m, pkgname, version, cache, state): # Only claim the package is upgradable if a candidate matches the version package_is_upgradable = False for candidate in avail_upgrades: - if package_version_compare(versions[candidate], installed_version) > 0: + if package_version_compare(candidate, installed_version) > 0: package_is_upgradable = True break else: From 3665c92856e0b9293895a018ed596aab8f680cf4 Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Thu, 25 Dec 2014 23:31:06 -0500 Subject: [PATCH 203/250] docker: fix volume[s]_from typo Code makes reference to volume_from instead of volumes_from. If volumes_from is passed as an argument, generates a KeyError. --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 03bf8a4af03..f71bad42e79 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -467,7 +467,7 @@ class DockerManager(object): # server APIVersion is passed to a docker-py function that takes strings _cap_ver_req = { 'dns': ((0, 3, 0), '1.10'), - 'volume_from': ((0, 3, 0), '1.10'), + 'volumes_from': ((0, 3, 0), '1.10'), 'restart_policy': ((0, 5, 0), '1.14'), # Clientside only 'insecure_registry': ((0, 5, 0), '0.0') From 50011f13d65c2d0a74a1e2ffc822bf13069fcc4d Mon Sep 17 00:00:00 2001 From: Johnny Robeson Date: Fri, 26 Dec 2014 00:02:36 -0500 Subject: [PATCH 204/250] [hostname] rename FedoraStratgy to SystemdStrategy --- system/hostname.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/system/hostname.py b/system/hostname.py index cd5738b43d5..2ca7479829b 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -28,6 +28,7 @@ requirements: [ hostname ] description: - Set system's hostname - Currently implemented on Debian, Ubuntu, Fedora, RedHat, openSUSE, Linaro, ScientificLinux, Arch, CentOS, AMI. + - Any distribution that uses systemd as their init system options: name: required: true @@ -232,9 +233,9 @@ class RedHatStrategy(GenericStrategy): # =========================================== -class FedoraStrategy(GenericStrategy): +class SystemdStrategy(GenericStrategy): """ - This is a Fedora family Hostname manipulation strategy class - it uses + This is a Systemd hostname manipulation strategy class - it uses the hostnamectl command. """ @@ -323,17 +324,17 @@ class OpenRCStrategy(GenericStrategy): class FedoraHostname(Hostname): platform = 'Linux' distribution = 'Fedora' - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy class OpenSUSEHostname(Hostname): platform = 'Linux' distribution = 'Opensuse ' - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy class ArchHostname(Hostname): platform = 'Linux' distribution = 'Arch' - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy class RedHat5Hostname(Hostname): platform = 'Linux' @@ -345,7 +346,7 @@ class RedHatServerHostname(Hostname): distribution = 'Red hat enterprise linux server' distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy else: strategy_class = RedHatStrategy @@ -354,7 +355,7 @@ class RedHatWorkstationHostname(Hostname): distribution = 'Red hat enterprise linux workstation' distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy else: strategy_class = RedHatStrategy @@ -363,7 +364,7 @@ class CentOSHostname(Hostname): distribution = 'Centos' distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy else: strategy_class = RedHatStrategy @@ -372,7 +373,7 @@ class CentOSLinuxHostname(Hostname): distribution = 'Centos linux' distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy else: strategy_class = RedHatStrategy From 6fce4a9c3dac5704eeba2e17b4bf023f930542c2 Mon Sep 17 00:00:00 2001 From: fabios Date: Thu, 25 Dec 2014 14:14:01 -0500 Subject: [PATCH 205/250] handle list of list for python-apt < 0.7.9 compatibility --- packaging/os/apt.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 43a7d6b390b..7eba3432e60 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -180,7 +180,8 @@ def package_versions(pkgname, pkg, pkg_cache): # assume older version of python-apt is installed # apt.package.Package#versions require python-apt >= 0.7.9. pkg_cache_list = (p for p in pkg_cache.Packages if p.Name == pkgname) - pkg_versions = (p.VersionList for p in pkg_cache_list) + pkg_list_of_lists = (p.VersionList for p in pkg_cache_list) + pkg_versions = (p for l in pkg_list_of_lists for p in l) versions = set(p.VerStr for p in pkg_versions) return versions From d19f7c702613b73154c40b40ebf6bdcd06745615 Mon Sep 17 00:00:00 2001 From: fabios Date: Sat, 27 Dec 2014 15:30:56 -0500 Subject: [PATCH 206/250] older python-apt compatibility --- packaging/os/apt.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 7eba3432e60..d1101bc7b8b 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -202,8 +202,12 @@ def package_status(m, pkgname, version, cache, state): ll_pkg = cache._cache[pkgname] # the low-level package object except KeyError: if state == 'install': - if cache.get_providing_packages(pkgname): - return False, True, False + try: + if cache.get_providing_packages(pkgname): + return False, True, False + except AttributeError: + # older python-apt providing packages cannot be used + pass m.fail_json(msg="No package matching '%s' is available" % pkgname) else: return False, False, False From b747d9411ac346835c4a38dbe3997c72c0e78137 Mon Sep 17 00:00:00 2001 From: fabios Date: Sat, 27 Dec 2014 21:19:00 -0500 Subject: [PATCH 207/250] improve fail message and use itertools chain --- packaging/os/apt.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index d1101bc7b8b..77f1e431b2f 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -144,6 +144,7 @@ warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning) import os import datetime import fnmatch +import itertools # APT related constants APT_ENV_VARS = dict( @@ -180,9 +181,8 @@ def package_versions(pkgname, pkg, pkg_cache): # assume older version of python-apt is installed # apt.package.Package#versions require python-apt >= 0.7.9. pkg_cache_list = (p for p in pkg_cache.Packages if p.Name == pkgname) - pkg_list_of_lists = (p.VersionList for p in pkg_cache_list) - pkg_versions = (p for l in pkg_list_of_lists for p in l) - versions = set(p.VerStr for p in pkg_versions) + pkg_versions = (p.VersionList for p in pkg_cache_list) + versions = set(p.VerStr for p in itertools.chain(*pkg_versions)) return versions @@ -205,10 +205,10 @@ def package_status(m, pkgname, version, cache, state): try: if cache.get_providing_packages(pkgname): return False, True, False + m.fail_json(msg="No package matching '%s' is available" % pkgname) except AttributeError: - # older python-apt providing packages cannot be used - pass - m.fail_json(msg="No package matching '%s' is available" % pkgname) + # python-apt version too old to detect virtual packages + m.fail_json(msg="No package matching '%s' is available (python-apt version too old to detect virtual packages)" % pkgname) else: return False, False, False try: From 9ed842e2d0442112f3674dd4ab063e33d70c0254 Mon Sep 17 00:00:00 2001 From: fabios Date: Sun, 28 Dec 2014 12:52:48 -0500 Subject: [PATCH 208/250] mark as upgradable and let apt-get install deal with it --- packaging/os/apt.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 77f1e431b2f..ad1807f9c20 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -208,7 +208,8 @@ def package_status(m, pkgname, version, cache, state): m.fail_json(msg="No package matching '%s' is available" % pkgname) except AttributeError: # python-apt version too old to detect virtual packages - m.fail_json(msg="No package matching '%s' is available (python-apt version too old to detect virtual packages)" % pkgname) + # mark as upgradable and let apt-get install deal with it + return False, True, False else: return False, False, False try: From ad7f5abf28b1b637c9b108a586ab7d22f8d031c4 Mon Sep 17 00:00:00 2001 From: Philip Misiowiec Date: Fri, 26 Sep 2014 19:02:25 -0700 Subject: [PATCH 209/250] Ability to detach an EBS volume from an EC2 instance --- cloud/amazon/ec2_vol.py | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 7919a9ec47e..050863c22fb 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -168,16 +168,20 @@ EXAMPLES = ''' id: vol-XXXXXXXX state: absent +# Detach a volume +- ec2_vol: + id: vol-XXXXXXXX + instance: None + # List volumes for an instance - ec2_vol: instance: i-XXXXXX state: list # Create new volume using SSD storage -- local_action: - module: ec2_vol - instance: XXXXXX - volume_size: 50 +- ec2_vol: + instance: XXXXXX + volume_size: 50 volume_type: gp2 device_name: /dev/xvdf ''' @@ -261,15 +265,18 @@ def create_volume(module, ec2, zone): if iops: volume_type = 'io1' + if instance == 'None' or instance == '': + instance = None + # If no instance supplied, try volume creation based on module parameters. if name or id: - if not instance: - module.fail_json(msg = "If name or id is specified, instance must also be specified") if iops or volume_size: module.fail_json(msg = "Parameters are not compatible: [id or name] and [iops or volume_size]") volume = get_volume(module, ec2) if volume.attachment_state() is not None: + if instance is None: + return volume adata = volume.attach_data if adata.instance_id != instance: module.fail_json(msg = "Volume %s is already attached to another instance: %s" @@ -331,6 +338,13 @@ def attach_volume(module, ec2, volume, instance): except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) +def detach_volume(module, ec2): + vol = get_volume(module, ec2) + if not vol or vol.attachment_state() is None: + module.exit_json(changed=False) + else: + vol.detach() + module.exit_json(changed=True) def main(): argument_spec = ec2_argument_spec() @@ -362,6 +376,9 @@ def main(): snapshot = module.params.get('snapshot') state = module.params.get('state') + if instance == 'None' or instance == '': + instance = None + ec2 = ec2_connect(module) if state == 'list': @@ -428,6 +445,8 @@ def main(): volume = create_volume(module, ec2, zone) if instance: attach_volume(module, ec2, volume, inst) + else: + detach_volume(module, ec2) module.exit_json(volume_id=volume.id, device=device_name, volume_type=volume.type) # import module snippets From 71d1044b8b6daf13fdc6b917cf91744500b752a3 Mon Sep 17 00:00:00 2001 From: Philip Misiowiec Date: Tue, 30 Dec 2014 23:37:38 -0800 Subject: [PATCH 210/250] Adds tenancy state to returning json --- cloud/amazon/ec2.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 050ed0b63f4..9bb0786753d 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -604,6 +604,11 @@ def get_instance_info(inst): except AttributeError: instance_info['ebs_optimized'] = False + try: + instance_info['tenancy'] = getattr(inst, 'placement_tenancy') + except AttributeError: + instance_info['tenancy'] = 'default' + return instance_info def boto_supports_associate_public_ip_address(ec2): From cfda942376fc7cb6eb3de75cdd8c423afd2deddd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lu=C3=ADs=20Guilherme=20F=2E=20Pereira?= Date: Wed, 1 Oct 2014 11:22:22 -0300 Subject: [PATCH 211/250] Allow ec2_lc to create EC2-Classic Launch Configs Removes default value from ec2_lc so it can create launch configurations valid on a EC2-Classic environment. AWS API will not accept a assign_public_ip when creating an ASG outside of VPC. --- cloud/amazon/ec2_lc.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 193a839c87d..30f532c9e4f 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -93,7 +93,6 @@ options: description: - Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC. required: false - default: false aliases: [] version_added: "1.8" ramdisk_id: @@ -255,7 +254,7 @@ def main(): ebs_optimized=dict(default=False, type='bool'), associate_public_ip_address=dict(type='bool'), instance_monitoring=dict(default=False, type='bool'), - assign_public_ip=dict(default=False, type='bool') + assign_public_ip=dict(type='bool') ) ) From 6237dab4cf3e2442403646825b8f6f9e72e40b7b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 6 Jan 2015 10:06:50 -0500 Subject: [PATCH 212/250] fix for when state=directory, follow=yes and target is symlink to directory --- files/file.py | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/files/file.py b/files/file.py index e154d6ad07f..46185f29215 100644 --- a/files/file.py +++ b/files/file.py @@ -103,6 +103,23 @@ EXAMPLES = ''' ''' + +def get_state(path): + ''' Find out current state ''' + + if os.path.lexists(path): + if os.path.islink(path): + return 'link' + elif os.path.isdir(path): + return 'directory' + elif os.stat(path).st_nlink > 1: + return 'hard' + else: + # could be many other things, but defaulting to file + return 'file' + + return 'absent' + def main(): module = AnsibleModule( @@ -143,18 +160,7 @@ def main(): pass module.exit_json(path=path, changed=False, appears_binary=appears_binary) - # Find out current state - prev_state = 'absent' - if os.path.lexists(path): - if os.path.islink(path): - prev_state = 'link' - elif os.path.isdir(path): - prev_state = 'directory' - elif os.stat(path).st_nlink > 1: - prev_state = 'hard' - else: - # could be many other things, but defaulting to file - prev_state = 'file' + prev_state = get_state(path) # state should default to file, but since that creates many conflicts, # default to 'current' when it exists. @@ -220,6 +226,11 @@ def main(): module.exit_json(path=path, changed=changed) elif state == 'directory': + + if follow and prev_state == 'link': + path = os.readlink(path) + prev_state = get_state(path) + if prev_state == 'absent': if module.check_mode: module.exit_json(changed=True) From c1eb7a4c1d41556c53dbcca597ca78ab964bca0d Mon Sep 17 00:00:00 2001 From: Vasyl Kaigorodov Date: Tue, 6 Jan 2015 20:44:09 +0100 Subject: [PATCH 213/250] gce_net - creating firewall rule, src_range value seems to get lost or set to empty string -- fixes #252 --- cloud/google/gce_net.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/google/gce_net.py b/cloud/google/gce_net.py index 102a73f2bd1..10592d20033 100644 --- a/cloud/google/gce_net.py +++ b/cloud/google/gce_net.py @@ -156,7 +156,7 @@ def main(): ipv4_range = dict(), fwname = dict(), name = dict(), - src_range = dict(), + src_range = dict(type='list'), src_tags = dict(type='list'), state = dict(default='present'), service_account_email = dict(), From 53404c786aa45e88cfd572ada353c921b8d367ec Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 6 Jan 2015 16:06:45 -0500 Subject: [PATCH 214/250] added version added for new template options --- cloud/vmware/vsphere_guest.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 817421011d2..8ad7df41dea 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -69,11 +69,13 @@ options: default: present choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured'] from_template: + version_added: "1.9" description: - Specifies if the VM should be deployed from a template (cannot be ran with state) default: no choices: ['yes', 'no'] template_src: + version_added: "1.9" description: - Name of the source template to deploy from default: None From 2973bac72f04454014b0d4453de6065e68aa74c1 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 7 Jan 2015 08:45:55 -0800 Subject: [PATCH 215/250] Clarify documented behaviour of user module's ssh_key_file parameter Fixes #9873 --- system/user.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/system/user.py b/system/user.py index aaeee5683d3..62dfb271e2d 100644 --- a/system/user.py +++ b/system/user.py @@ -153,10 +153,11 @@ options: present on target host. ssh_key_file: required: false - default: $HOME/.ssh/id_rsa + default: .ssh/id_rsa version_added: "0.9" description: - - Optionally specify the SSH key filename. + - Optionally specify the SSH key filename. If this is a relative + filename then it will be relative to the user's home directory. ssh_key_comment: required: false default: ansible-generated @@ -189,8 +190,8 @@ EXAMPLES = ''' # Remove the user 'johnd' - user: name=johnd state=absent remove=yes -# Create a 2048-bit SSH key for user jsmith -- user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 +# Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa +- user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa ''' import os From 78cacd0c2270feae0c03624f2e784fca3985c865 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 7 Jan 2015 13:16:53 -0500 Subject: [PATCH 216/250] fix for allowing permissions on hard links and soft links + follow=yes --- files/file.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/files/file.py b/files/file.py index 002776fd6ff..35bb52ab1e3 100644 --- a/files/file.py +++ b/files/file.py @@ -218,7 +218,15 @@ def main(): module.exit_json(path=path, changed=False) elif state == 'file': + if state != prev_state: + if follow and prev_state == 'link': + # follow symlink and operate on original + path = os.readlink(path) + prev_state = get_state(path) + file_args['path'] = path + + if prev_state not in ['file','hard']: # file is not absent and any other state is a conflict module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state)) From 242aa9f81b4a54fe4cf4da3c84e802e83c254b2c Mon Sep 17 00:00:00 2001 From: James Martin Date: Tue, 6 Jan 2015 14:04:03 -0500 Subject: [PATCH 217/250] vpc_zone_identifier must be a csv string when an asg is updated. --- cloud/amazon/ec2_asg.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 8c9661706b0..6e5d3508cb8 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -326,6 +326,8 @@ def create_autoscaling_group(connection, module): for attr in ASG_ATTRIBUTES: if module.params.get(attr): module_attr = module.params.get(attr) + if attr == 'vpc_zone_identifier': + module_attr = ','.join(module_attr) group_attr = getattr(as_group, attr) # we do this because AWS and the module may return the same list # sorted differently From a07873d6a39dee7a26cae07b1e4619660a17f5db Mon Sep 17 00:00:00 2001 From: Bruce Pennypacker Date: Thu, 8 Jan 2015 16:26:22 +0000 Subject: [PATCH 218/250] Added support for 'REQUIRE SSL' grant option --- database/mysql/mysql_user.py | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index e160fcb68f6..68d6f031490 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -117,6 +117,9 @@ EXAMPLES = """ # Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION' - mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present +# Modifiy user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself. +- mysql_user: name=bob append=true priv=*.*:REQUIRESSL state=present + # Ensure no user named 'sally' exists, also passing in the auth credentials. - mysql_user: login_user=root login_password=123456 name=sally state=absent @@ -159,7 +162,7 @@ VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION', 'EXECUTE', 'FILE', 'CREATE TABLESPACE', 'CREATE USER', 'PROCESS', 'PROXY', 'RELOAD', 'REPLICATION CLIENT', 'REPLICATION SLAVE', 'SHOW DATABASES', 'SHUTDOWN', - 'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE',)) + 'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE', 'REQUIRESSL')) class InvalidPrivsError(Exception): pass @@ -261,6 +264,8 @@ def privileges_get(cursor, user,host): privileges = [ pick(x) for x in privileges] if "WITH GRANT OPTION" in res.group(4): privileges.append('GRANT') + if "REQUIRE SSL" in res.group(4): + privileges.append('REQUIRESSL') db = res.group(2) output[db] = privileges return output @@ -294,6 +299,11 @@ def privileges_unpack(priv): if '*.*' not in output: output['*.*'] = ['USAGE'] + # if we are only specifying something like REQUIRESSL in *.* we still need + # to add USAGE as a privilege to avoid syntax errors + if priv.find('REQUIRESSL') != -1 and 'USAGE' not in output['*.*']: + output['*.*'].append('USAGE') + return output def privileges_revoke(cursor, user,host,db_table,grant_option): @@ -307,19 +317,28 @@ def privileges_revoke(cursor, user,host,db_table,grant_option): query = ["REVOKE ALL PRIVILEGES ON %s" % mysql_quote_identifier(db_table, 'table')] query.append("FROM %s@%s") query = ' '.join(query) - cursor.execute(query, (user, host)) + try: + cursor.execute(query, (user, host)) + except Exception, e: + raise Exception("%s. Query=\"%s\"" % (str(e), query % (user, host))) def privileges_grant(cursor, user,host,db_table,priv): # Escape '%' since mysql db.execute uses a format string and the # specification of db and table often use a % (SQL wildcard) db_table = db_table.replace('%', '%%') - priv_string = ",".join(filter(lambda x: x != 'GRANT', priv)) + priv_string = ",".join(filter(lambda x: x not in [ 'GRANT', 'REQUIRESSL' ], priv)) query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))] query.append("TO %s@%s") if 'GRANT' in priv: - query.append("WITH GRANT OPTION") + query.append(" WITH GRANT OPTION") + if 'REQUIRESSL' in priv: + query.append(" REQUIRE SSL") query = ' '.join(query) - cursor.execute(query, (user, host)) + try: + cursor.execute(query, (user, host)) + except Exception, e: + raise Exception("%s. Query=\"%s\"" % (str(e), query % (user, host))) + def strip_quotes(s): From 272bb1fa63cc869e36b4830b8094195ba4999297 Mon Sep 17 00:00:00 2001 From: Bruce Pennypacker Date: Thu, 8 Jan 2015 21:41:15 +0000 Subject: [PATCH 219/250] requested changes --- database/mysql/mysql_user.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 68d6f031490..7d4777fb831 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -317,10 +317,7 @@ def privileges_revoke(cursor, user,host,db_table,grant_option): query = ["REVOKE ALL PRIVILEGES ON %s" % mysql_quote_identifier(db_table, 'table')] query.append("FROM %s@%s") query = ' '.join(query) - try: - cursor.execute(query, (user, host)) - except Exception, e: - raise Exception("%s. Query=\"%s\"" % (str(e), query % (user, host))) + cursor.execute(query, (user, host)) def privileges_grant(cursor, user,host,db_table,priv): # Escape '%' since mysql db.execute uses a format string and the @@ -330,16 +327,11 @@ def privileges_grant(cursor, user,host,db_table,priv): query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))] query.append("TO %s@%s") if 'GRANT' in priv: - query.append(" WITH GRANT OPTION") + query.append("WITH GRANT OPTION") if 'REQUIRESSL' in priv: - query.append(" REQUIRE SSL") + query.append("REQUIRE SSL") query = ' '.join(query) - try: - cursor.execute(query, (user, host)) - except Exception, e: - raise Exception("%s. Query=\"%s\"" % (str(e), query % (user, host))) - - + cursor.execute(query, (user, host)) def strip_quotes(s): """ Remove surrounding single or double quotes From bcfba0c05098696b6e770335870a9c22792fec38 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Mon, 12 Jan 2015 01:02:29 +0100 Subject: [PATCH 220/250] Remove unused code There is no call to yum_base using 'cachedir' argument, so while it work fine from a cursory look, that's useless code, and so should be removed to clarify the code. --- packaging/os/yum.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 73fbb699e75..65d5b43b07c 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -149,21 +149,13 @@ def log(msg): syslog.openlog('ansible-yum', 0, syslog.LOG_USER) syslog.syslog(syslog.LOG_NOTICE, msg) -def yum_base(conf_file=None, cachedir=False): +def yum_base(conf_file=None): my = yum.YumBase() my.preconf.debuglevel=0 my.preconf.errorlevel=0 if conf_file and os.path.exists(conf_file): my.preconf.fn = conf_file - if cachedir or os.geteuid() != 0: - if hasattr(my, 'setCacheDir'): - my.setCacheDir() - else: - cachedir = yum.misc.getCacheDir() - my.repos.setCacheDir(cachedir) - my.conf.cache = 0 - return my def install_yum_utils(module): From 826d313aada2157742ba327a40ffa3749739c1a0 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Mon, 12 Jan 2015 19:08:22 +0100 Subject: [PATCH 221/250] Do not use echo shell builtin for password Using \t in a password may result in a different password being set : $ echo 'a\ta' a a Problem report originally found by Pilou- ( https://github.com/ansible/ansible-modules-extras/pull/198 ) --- system/user.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/system/user.py b/system/user.py index 62dfb271e2d..e1fa7f203aa 100644 --- a/system/user.py +++ b/system/user.py @@ -263,12 +263,12 @@ class User(object): # select whether we dump additional debug info through syslog self.syslogging = False - def execute_command(self, cmd, use_unsafe_shell=False): + def execute_command(self, cmd, use_unsafe_shell=False, data=None): if self.syslogging: syslog.openlog('ansible-%s' % os.path.basename(__file__)) syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd)) - return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell) + return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) def remove_user_userdel(self): cmd = [self.module.get_bin_path('userdel', True)] @@ -1368,11 +1368,10 @@ class AIX(User): # set password with chpasswd if self.password is not None: cmd = [] - cmd.append('echo \''+self.name+':'+self.password+'\' |') cmd.append(self.module.get_bin_path('chpasswd', True)) cmd.append('-e') cmd.append('-c') - self.execute_command(' '.join(cmd), use_unsafe_shell=True) + self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password)) return (rc, out, err) @@ -1444,11 +1443,10 @@ class AIX(User): # set password with chpasswd if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd = [] - cmd.append('echo \''+self.name+':'+self.password+'\' |') cmd.append(self.module.get_bin_path('chpasswd', True)) cmd.append('-e') cmd.append('-c') - (rc2, out2, err2) = self.execute_command(' '.join(cmd), use_unsafe_shell=True) + (rc2, out2, err2) = self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password)) else: (rc2, out2, err2) = (None, '', '') From fbb9dcc69a33a2051502ef3cb1a43b3e5a97a2d7 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 12 Jan 2015 14:36:57 -0800 Subject: [PATCH 222/250] Also catch mysql errors so we can give the error message back through json rather than tracebacking --- database/mysql/mysql_user.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 7d4777fb831..3590fb8e640 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -498,16 +498,14 @@ def main(): if user_exists(cursor, user, host): try: changed = user_mod(cursor, user, host, password, priv, append_privs) - except SQLParseError, e: - module.fail_json(msg=str(e)) - except InvalidPrivsError, e: + except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: module.fail_json(msg=str(e)) else: if password is None: module.fail_json(msg="password parameter required when adding a user") try: changed = user_add(cursor, user, host, password, priv) - except SQLParseError, e: + except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: module.fail_json(msg=str(e)) elif state == "absent": if user_exists(cursor, user, host): From d2ae2e6cc652ce082dd3b8522608407e12d26146 Mon Sep 17 00:00:00 2001 From: Alex Clifford Date: Wed, 14 Jan 2015 12:12:02 +1100 Subject: [PATCH 223/250] Fix slight typo in doco --- cloud/amazon/rds_subnet_group.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/rds_subnet_group.py b/cloud/amazon/rds_subnet_group.py index bba6cd86872..9731154f77c 100644 --- a/cloud/amazon/rds_subnet_group.py +++ b/cloud/amazon/rds_subnet_group.py @@ -79,8 +79,8 @@ EXAMPLES = ''' - subnet-aaaaaaaa - subnet-bbbbbbbb -# Remove a parameter group -- rds_param_group: +# Remove a subnet group +- rds_subnet_group: state: absent name: norwegian-blue ''' From 68b5b7be72fe3387f5fae954d391802944f07c40 Mon Sep 17 00:00:00 2001 From: Tim G Date: Wed, 14 Jan 2015 13:15:38 +1000 Subject: [PATCH 224/250] distutils is not available on some non-Linux OS's --- system/service.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index 275bac900a9..fea5722710e 100644 --- a/system/service.py +++ b/system/service.py @@ -106,7 +106,8 @@ import select import time import string -from distutils.version import LooseVersion +if platform.system() == 'Linux': + from distutils.version import LooseVersion class Service(object): """ From 6cbce4d911b62a6135bcfcbb6da9eb3e497c0fbb Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 13 Jan 2015 22:39:20 -0800 Subject: [PATCH 225/250] Potential fix for 640 --- cloud/amazon/elasticache.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index c1846f525a8..4e76d593cc9 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -357,7 +357,9 @@ class ElastiCacheManager(object): 'modifying': 'available', 'deleting': 'gone' } - + if self.status == awaited_status: + # No need to wait, we're already done + return if status_map[self.status] != awaited_status: msg = "Invalid awaited status. '%s' cannot transition to '%s'" self.module.fail_json(msg=msg % (self.status, awaited_status)) From 477391bb24322629b9c10a342415a66f0bcef7b3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 13 Jan 2015 23:18:04 -0800 Subject: [PATCH 226/250] Better error messages if a2enmod/a2dismod are not found --- web_infrastructure/apache2_module.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/web_infrastructure/apache2_module.py b/web_infrastructure/apache2_module.py index 39351482087..bd6de56aed2 100644 --- a/web_infrastructure/apache2_module.py +++ b/web_infrastructure/apache2_module.py @@ -49,6 +49,9 @@ import re def _disable_module(module): name = module.params['name'] a2dismod_binary = module.get_bin_path("a2dismod") + if a2dismod_binary is None: + module.fail_json(msg="a2dismod not found. Perhaps this system does not use a2dismod to manage apache") + result, stdout, stderr = module.run_command("%s %s" % (a2dismod_binary, name)) if re.match(r'.*' + name + r' already disabled.*', stdout, re.S): @@ -61,6 +64,9 @@ def _disable_module(module): def _enable_module(module): name = module.params['name'] a2enmod_binary = module.get_bin_path("a2enmod") + if a2enmod_binary is None: + module.fail_json(msg="a2enmod not found. Perhaps this system does not use a2enmod to manage apache") + result, stdout, stderr = module.run_command("%s %s" % (a2enmod_binary, name)) if re.match(r'.*' + name + r' already enabled.*', stdout, re.S): @@ -86,4 +92,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() From 6159b5c4235b427cd6553a2d8d99d4fff12bc805 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 14 Jan 2015 13:10:13 -0800 Subject: [PATCH 227/250] Implement user,group,mode,selinux settings for unarchive. This is a partial fix for #234. Still have to figure out how to make change reporting work as we can no longer rely on tar's --compare option --- files/unarchive.py | 91 +++++++++++++++++++++++++++++++--------------- 1 file changed, 62 insertions(+), 29 deletions(-) diff --git a/files/unarchive.py b/files/unarchive.py index f46e52e02a3..c567cfc3d8a 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -76,16 +76,33 @@ EXAMPLES = ''' ''' import os +from zipfile import ZipFile +class UnarchiveError(Exception): + pass # class to handle .zip files class ZipFile(object): - + def __init__(self, src, dest, module): self.src = src self.dest = dest self.module = module self.cmd_path = self.module.get_bin_path('unzip') + self._files_in_archive = [] + + @property + def files_in_archive(self, force_refresh=False): + if self._files_in_archive and not force_refresh: + return self._files_in_archive + + archive = ZipFile(self.src) + try: + self._files_in_archive = archive.namelist() + except: + raise UnarchiveError('Unable to list files in the archive') + + return self._files_in_archive def is_unarchived(self): return dict(unarchived=False) @@ -107,13 +124,29 @@ class ZipFile(object): # class to handle gzipped tar files class TgzFile(object): - + def __init__(self, src, dest, module): self.src = src self.dest = dest self.module = module self.cmd_path = self.module.get_bin_path('tar') self.zipflag = 'z' + self._files_in_archive = [] + + @property + def files_in_archive(self, force_refresh=False): + if self._files_in_archive and not force_refresh: + return self._files_in_archive + + cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src) + rc, out, err = self.module.run_command(cmd) + if rc != 0: + raise UnarchiveError('Unable to list files in the archive') + + for filename in out.splitlines(): + if filename: + self._files_in_archive.append(filename) + return self._files_in_archive def is_unarchived(self): cmd = '%s -v -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src) @@ -129,41 +162,35 @@ class TgzFile(object): def can_handle_archive(self): if not self.cmd_path: return False - cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src) - rc, out, err = self.module.run_command(cmd) - if rc == 0: - if len(out.splitlines(True)) > 0: + + try: + if self.files_in_archive: return True + except UnarchiveError: + pass + # Errors and no files in archive assume that we weren't able to + # properly unarchive it return False # class to handle tar files that aren't compressed class TarFile(TgzFile): def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') + super(TarFile, self).__init__(src, dest, module) self.zipflag = '' # class to handle bzip2 compressed tar files class TarBzip(TgzFile): def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') + super(TarFile, self).__init__(src, dest, module) self.zipflag = 'j' # class to handle xz compressed tar files class TarXz(TgzFile): def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') + super(TarFile, self).__init__(src, dest, module) self.zipflag = 'J' @@ -193,6 +220,7 @@ def main(): src = os.path.expanduser(module.params['src']) dest = os.path.expanduser(module.params['dest']) copy = module.params['copy'] + file_args = module.load_file_common_arguments(module.params) # did tar file arrive? if not os.path.exists(src): @@ -217,20 +245,25 @@ def main(): res_args['check_results'] = handler.is_unarchived() if res_args['check_results']['unarchived']: res_args['changed'] = False - module.exit_json(**res_args) - - # do the unpack - try: - res_args['extract_results'] = handler.unarchive() - if res_args['extract_results']['rc'] != 0: - module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args) - except IOError: - module.fail_json(msg="failed to unpack %s to %s" % (src, dest)) + else: + # do the unpack + try: + res_args['extract_results'] = handler.unarchive() + if res_args['extract_results']['rc'] != 0: + module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args) + except IOError: + module.fail_json(msg="failed to unpack %s to %s" % (src, dest)) + else: + res_args['changed'] = True - res_args['changed'] = True + # do we need to change perms? + for filename in handler.files_in_archive: + file_args['path'] = os.path.join(dest, filename) + res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed']) module.exit_json(**res_args) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() From e2dcb5fc9bd701fd89931ace29e99a198140c300 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 14 Jan 2015 17:22:05 -0500 Subject: [PATCH 228/250] now captures cache fetch failures --- packaging/os/apt.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index ad1807f9c20..9f5b8fd4cda 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -619,6 +619,8 @@ def main(): except apt.cache.LockFailedException: module.fail_json(msg="Failed to lock apt for exclusive operation") + except apt.cache.FetchFailedException: + module.fail_json(msg="Could not fetch updated apt files") # import module snippets from ansible.module_utils.basic import * From e3759bd0d396d63b869732a63ecae5ca7a2a9641 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 14 Jan 2015 19:12:35 -0800 Subject: [PATCH 229/250] Add detection of uid,gid,mode changes when deciding whether an archive needs to be unarchived again. --- files/unarchive.py | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/files/unarchive.py b/files/unarchive.py index c567cfc3d8a..db9defb37c4 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -104,7 +104,7 @@ class ZipFile(object): return self._files_in_archive - def is_unarchived(self): + def is_unarchived(self, mode, owner, group): return dict(unarchived=False) def unarchive(self): @@ -148,10 +148,32 @@ class TgzFile(object): self._files_in_archive.append(filename) return self._files_in_archive - def is_unarchived(self): - cmd = '%s -v -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src) + def is_unarchived(self, mode, owner, group): + cmd = '%s -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src) rc, out, err = self.module.run_command(cmd) unarchived = (rc == 0) + if not unarchived: + # Check whether the differences are in something that we're + # setting anyway + + # What will be set + to_be_set = set() + for perm in (('Mode', mode), ('Gid', group), ('Uid', owner)): + if perm[1] is not None: + to_be_set.add(perm[0]) + + # What is different + changes = set() + difference_re = re.compile(r': (.*) differs$') + for line in out.splitlines(): + match = difference_re.search(line) + if not match: + # Unknown tar output. Assume we have changes + return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd) + changes.add(match.groups()[0]) + + if changes and changes.issubset(to_be_set): + unarchived = True return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd) def unarchive(self): @@ -242,7 +264,8 @@ def main(): res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src) # do we need to do unpack? - res_args['check_results'] = handler.is_unarchived() + res_args['check_results'] = handler.is_unarchived(file_args['mode'], + file_args['owner'], file_args['group']) if res_args['check_results']['unarchived']: res_args['changed'] = False else: From 8f43a0a4fe641b36e31e63418a3263e60b695d8e Mon Sep 17 00:00:00 2001 From: Tim G Date: Fri, 16 Jan 2015 10:06:36 +1000 Subject: [PATCH 230/250] Load distutils on all platforms EXCEPT Solaris. Solaris doesn't ship distutils with with the default Python package. This patch fixes "service" on Solaris since 30d6713. --- system/service.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index fea5722710e..362359dd553 100644 --- a/system/service.py +++ b/system/service.py @@ -106,7 +106,8 @@ import select import time import string -if platform.system() == 'Linux': +# The distutils module is not shipped with SUNWPython on Solaris. +if platform.system() != 'SunOS': from distutils.version import LooseVersion class Service(object): From b2fd4d18e000c39cbae63e4df09fce2635e9bebe Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 15 Jan 2015 19:13:32 -0600 Subject: [PATCH 231/250] It is not correct that you need at least 1 primary node --- cloud/rackspace/rax_clb_nodes.py | 24 +----------------------- 1 file changed, 1 insertion(+), 23 deletions(-) diff --git a/cloud/rackspace/rax_clb_nodes.py b/cloud/rackspace/rax_clb_nodes.py index 24325b44597..472fad19b1c 100644 --- a/cloud/rackspace/rax_clb_nodes.py +++ b/cloud/rackspace/rax_clb_nodes.py @@ -150,21 +150,6 @@ def _get_node(lb, node_id=None, address=None, port=None): return None -def _is_primary(node): - """Return True if node is primary and enabled""" - return (node.type.lower() == 'primary' and - node.condition.lower() == 'enabled') - - -def _get_primary_nodes(lb): - """Return a list of primary and enabled nodes""" - nodes = [] - for node in lb.nodes: - if _is_primary(node): - nodes.append(node) - return nodes - - def main(): argument_spec = rax_argument_spec() argument_spec.update( @@ -230,13 +215,6 @@ def main(): if state == 'absent': if not node: # Removing a non-existent node module.exit_json(changed=False, state=state) - - # The API detects this as well but currently pyrax does not return a - # meaningful error message - if _is_primary(node) and len(_get_primary_nodes(lb)) == 1: - module.fail_json( - msg='At least one primary node has to be enabled') - try: lb.delete_node(node) result = {} @@ -299,5 +277,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.rax import * -### invoke the module +# invoke the module main() From d2829c2510373a52230ab5d034e60f173d2e5e09 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Thu, 15 Jan 2015 19:27:34 -0600 Subject: [PATCH 232/250] Just use built in required functionality for arguments --- cloud/rackspace/rax_clb.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/cloud/rackspace/rax_clb.py b/cloud/rackspace/rax_clb.py index 7a2699709da..38baa77b6ff 100644 --- a/cloud/rackspace/rax_clb.py +++ b/cloud/rackspace/rax_clb.py @@ -140,10 +140,6 @@ except ImportError: def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, vip_type, timeout, wait, wait_timeout, vip_id): - for arg in (state, name, port, protocol, vip_type): - if not arg: - module.fail_json(msg='%s is required for rax_clb' % arg) - if int(timeout) < 30: module.fail_json(msg='"timeout" must be greater than or equal to 30') @@ -257,7 +253,7 @@ def main(): algorithm=dict(choices=CLB_ALGORITHMS, default='LEAST_CONNECTIONS'), meta=dict(type='dict', default={}), - name=dict(), + name=dict(required=True), port=dict(type='int', default=80), protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'), state=dict(default='present', choices=['present', 'absent']), From 6ca357198e464d079294b600befc04da36aacef6 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Fri, 16 Jan 2015 15:33:41 -0600 Subject: [PATCH 233/250] Clean up some required argument logic --- cloud/rackspace/rax_keypair.py | 4 ++-- cloud/rackspace/rax_network.py | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/cloud/rackspace/rax_keypair.py b/cloud/rackspace/rax_keypair.py index 591ad8c3597..8f38abc12e0 100644 --- a/cloud/rackspace/rax_keypair.py +++ b/cloud/rackspace/rax_keypair.py @@ -104,7 +104,7 @@ def rax_keypair(module, name, public_key, state): keypair = {} if state == 'present': - if os.path.isfile(public_key): + if public_key and os.path.isfile(public_key): try: f = open(public_key) public_key = f.read() @@ -143,7 +143,7 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - name=dict(), + name=dict(required=True), public_key=dict(), state=dict(default='present', choices=['absent', 'present']), ) diff --git a/cloud/rackspace/rax_network.py b/cloud/rackspace/rax_network.py index bc4745a7a84..bd23f5f878d 100644 --- a/cloud/rackspace/rax_network.py +++ b/cloud/rackspace/rax_network.py @@ -65,10 +65,6 @@ except ImportError: def cloud_network(module, state, label, cidr): - for arg in (state, label, cidr): - if not arg: - module.fail_json(msg='%s is required for cloud_networks' % arg) - changed = False network = None networks = [] @@ -79,6 +75,9 @@ def cloud_network(module, state, label, cidr): 'incorrectly capitalized region name.') if state == 'present': + if not cidr: + module.fail_json(msg='missing required arguments: cidr') + try: network = pyrax.cloud_networks.find_network_by_label(label) except pyrax.exceptions.NetworkNotFound: @@ -115,7 +114,7 @@ def main(): dict( state=dict(default='present', choices=['present', 'absent']), - label=dict(), + label=dict(required=True), cidr=dict() ) ) From 0794597c7e3ed584a8d3d238393fece94197f32b Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 19 Jan 2015 09:32:50 -0500 Subject: [PATCH 234/250] corrected release when this feature was added --- cloud/amazon/ec2_vol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 050863c22fb..7fd58fa5348 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -55,7 +55,7 @@ options: required: false default: standard aliases: [] - version_added: "1.8" + version_added: "1.9" iops: description: - the provisioned IOPs you want to associate with this volume (integer). From d8032ecc8171d24f8034e1f6e2d8976ca4d6c134 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 19 Jan 2015 10:54:22 -0600 Subject: [PATCH 235/250] Use rax_to_dict and make sure to return the volume details after deletion --- cloud/rackspace/rax_cbs.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/cloud/rackspace/rax_cbs.py b/cloud/rackspace/rax_cbs.py index a1b6ce46a6e..b72d757c71f 100644 --- a/cloud/rackspace/rax_cbs.py +++ b/cloud/rackspace/rax_cbs.py @@ -145,10 +145,7 @@ def cloud_block_storage(module, state, name, description, meta, size, attempts=attempts) volume.get() - for key, value in vars(volume).iteritems(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value + instance = rax_to_dict(volume) result = dict(changed=changed, volume=instance) @@ -164,6 +161,7 @@ def cloud_block_storage(module, state, name, description, meta, size, elif state == 'absent': if volume: + instance = rax_to_dict(volume) try: volume.delete() changed = True From f3d8d0f83f5ae13b8d58d758daffb38db4a4c81a Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 19 Jan 2015 10:58:22 -0600 Subject: [PATCH 236/250] Remove some broken and unnecessary required args logic --- cloud/rackspace/rax_cbs.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/cloud/rackspace/rax_cbs.py b/cloud/rackspace/rax_cbs.py index b72d757c71f..261168889cc 100644 --- a/cloud/rackspace/rax_cbs.py +++ b/cloud/rackspace/rax_cbs.py @@ -108,10 +108,6 @@ except ImportError: def cloud_block_storage(module, state, name, description, meta, size, snapshot_id, volume_type, wait, wait_timeout): - for arg in (state, name, size, volume_type): - if not arg: - module.fail_json(msg='%s is required for rax_cbs' % arg) - if size < 100: module.fail_json(msg='"size" must be greater than or equal to 100') From 32ef72df2eafafb6e8b9c5523813717aace86931 Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 19 Jan 2015 12:43:30 -0600 Subject: [PATCH 237/250] Small fix-ups to convert objects to dicts, update volume details at the appropriate time, and remove unnecessary required argument logic --- cloud/rackspace/rax_cbs_attachments.py | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/cloud/rackspace/rax_cbs_attachments.py b/cloud/rackspace/rax_cbs_attachments.py index 365f93cd6e2..870b8e611df 100644 --- a/cloud/rackspace/rax_cbs_attachments.py +++ b/cloud/rackspace/rax_cbs_attachments.py @@ -90,11 +90,6 @@ except ImportError: def cloud_block_storage_attachments(module, state, volume, server, device, wait, wait_timeout): - for arg in (state, volume, server, device): - if not arg: - module.fail_json(msg='%s is required for rax_cbs_attachments' % - arg) - cbs = pyrax.cloud_blockstorage cs = pyrax.cloudservers @@ -133,7 +128,7 @@ def cloud_block_storage_attachments(module, state, volume, server, device, not key.startswith('_')): instance[key] = value - result = dict(changed=changed, volume=instance) + result = dict(changed=changed) if volume.status == 'error': result['msg'] = '%s failed to build' % volume.id @@ -142,6 +137,9 @@ def cloud_block_storage_attachments(module, state, volume, server, device, pyrax.utils.wait_until(volume, 'status', 'in-use', interval=5, attempts=attempts) + volume.get() + result['volume'] = rax_to_dict(volume) + if 'msg' in result: module.fail_json(**result) else: @@ -167,12 +165,7 @@ def cloud_block_storage_attachments(module, state, volume, server, device, elif volume.attachments: module.fail_json(msg='Volume is attached to another server') - for key, value in vars(volume).iteritems(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value - - result = dict(changed=changed, volume=instance) + result = dict(changed=changed, volume=rax_to_dict(volume)) if volume.status == 'error': result['msg'] = '%s failed to build' % volume.id From c526a695de9a2d5f1c87567f2898cc72d595b38c Mon Sep 17 00:00:00 2001 From: Matt Martz Date: Mon, 19 Jan 2015 13:08:56 -0600 Subject: [PATCH 238/250] Remove unnecessary required arg logic, and remove 'absent' as a valid choice since it isn't implemented --- cloud/rackspace/rax_identity.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/cloud/rackspace/rax_identity.py b/cloud/rackspace/rax_identity.py index ea40ea2ef46..47b4cb60cf0 100644 --- a/cloud/rackspace/rax_identity.py +++ b/cloud/rackspace/rax_identity.py @@ -55,10 +55,6 @@ except ImportError: def cloud_identity(module, state, identity): - for arg in (state, identity): - if not arg: - module.fail_json(msg='%s is required for rax_identity' % arg) - instance = dict( authenticated=identity.authenticated, credentials=identity._creds_file @@ -79,7 +75,7 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - state=dict(default='present', choices=['present', 'absent']) + state=dict(default='present', choices=['present']) ) ) @@ -95,7 +91,7 @@ def main(): setup_rax_module(module, pyrax) - if pyrax.identity is None: + if not pyrax.identity: module.fail_json(msg='Failed to instantiate client. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') @@ -106,5 +102,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.rax import * -### invoke the module +# invoke the module main() From df2088377bc09f899a0e85bb7b6ff278e661c005 Mon Sep 17 00:00:00 2001 From: Alex Clifford Date: Tue, 20 Jan 2015 10:43:22 +1100 Subject: [PATCH 239/250] ttl should always be used during a delete --- cloud/amazon/route53.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 6fb44fcbf0f..8938a728700 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -113,6 +113,7 @@ EXAMPLES = ''' command: delete zone: foo.com record: "{{ rec.set.record }}" + ttl: "{{ rec.set.ttl }}" type: "{{ rec.set.type }}" value: "{{ rec.set.value }}" From f85b7ee13c60642ec33ec8790d91e455387e1306 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 19 Jan 2015 19:37:57 -0500 Subject: [PATCH 240/250] now handles non string values for sysctl --- system/sysctl.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/system/sysctl.py b/system/sysctl.py index 3cf29f9a32b..4517c724ca9 100644 --- a/system/sysctl.py +++ b/system/sysctl.py @@ -185,12 +185,20 @@ class SysctlModule(object): def _parse_value(self, value): if value is None: return '' - elif value.lower() in BOOLEANS_TRUE: - return '1' - elif value.lower() in BOOLEANS_FALSE: - return '0' + elif isinstance(value, bool): + if value: + return '1' + else: + return '0' + elif isinstance(value, basestring): + if value.lower() in BOOLEANS_TRUE: + return '1' + elif value.lower() in BOOLEANS_FALSE: + return '0' + else: + return value.strip() else: - return value.strip() + return value # ============================================================== # SYSCTL COMMAND MANAGEMENT From 347234b937d9054a35dc67bc7bffd96781da7dac Mon Sep 17 00:00:00 2001 From: Ben Konrath Date: Tue, 20 Jan 2015 21:50:15 +0100 Subject: [PATCH 241/250] Add support for SL7 to hostname module. --- system/hostname.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/system/hostname.py b/system/hostname.py index 2ca7479829b..f645a8cdfd3 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -380,12 +380,20 @@ class CentOSLinuxHostname(Hostname): class ScientificHostname(Hostname): platform = 'Linux' distribution = 'Scientific' - strategy_class = RedHatStrategy + distribution_version = get_distribution_version() + if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): + strategy_class = SystemdStrategy + else: + strategy_class = RedHatStrategy class ScientificLinuxHostname(Hostname): platform = 'Linux' distribution = 'Scientific linux' - strategy_class = RedHatStrategy + distribution_version = get_distribution_version() + if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): + strategy_class = SystemdStrategy + else: + strategy_class = RedHatStrategy class AmazonLinuxHostname(Hostname): platform = 'Linux' From 95d4b796c0a7eb6827d49d756467fcc4b4f21c5e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 21 Jan 2015 17:27:40 -0500 Subject: [PATCH 242/250] updated tenancy version added --- cloud/amazon/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 84fa572adab..29c142514c8 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -62,7 +62,7 @@ options: default: null aliases: [] tenancy: - version_added: "1.8" + version_added: "1.9" description: - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are:"default" or "dedicated". NOTE: To use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. required: false From 0162fc525dc5f8cd67caf663086160efa27707d8 Mon Sep 17 00:00:00 2001 From: Ryan Rawson Date: Tue, 20 Jan 2015 16:32:36 -0800 Subject: [PATCH 243/250] Fixes #581 - digitalocean module cannot create private_networking=true droplets --- cloud/digital_ocean/digital_ocean.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/digital_ocean/digital_ocean.py b/cloud/digital_ocean/digital_ocean.py index efebf5f1bcf..7e0a432c8dc 100644 --- a/cloud/digital_ocean/digital_ocean.py +++ b/cloud/digital_ocean/digital_ocean.py @@ -236,7 +236,8 @@ class Droplet(JsonfyMixIn): @classmethod def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False): - json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking, backups_enabled) + private_networking_lower = str(private_networking).lower() + json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking_lower, backups_enabled) droplet = cls(json) return droplet From 9f333afb6abe24630ba5b6cd2745d3dcc269d712 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 21 Jan 2015 16:14:11 -0800 Subject: [PATCH 244/250] Standardize class names on Archive suffix. This also removes the collision between the stdlib ZipFile class and the module's ZipFile class Fixes #681 --- files/unarchive.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/files/unarchive.py b/files/unarchive.py index db9defb37c4..fc2db0e6907 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -82,7 +82,7 @@ class UnarchiveError(Exception): pass # class to handle .zip files -class ZipFile(object): +class ZipArchive(object): def __init__(self, src, dest, module): self.src = src @@ -123,7 +123,7 @@ class ZipFile(object): # class to handle gzipped tar files -class TgzFile(object): +class TgzArchive(object): def __init__(self, src, dest, module): self.src = src @@ -196,29 +196,29 @@ class TgzFile(object): # class to handle tar files that aren't compressed -class TarFile(TgzFile): +class TarArchive(TgzArchive): def __init__(self, src, dest, module): - super(TarFile, self).__init__(src, dest, module) + super(TarArchive, self).__init__(src, dest, module) self.zipflag = '' # class to handle bzip2 compressed tar files -class TarBzip(TgzFile): +class TarBzipArchive(TgzArchive): def __init__(self, src, dest, module): - super(TarFile, self).__init__(src, dest, module) + super(TarBzipArchive, self).__init__(src, dest, module) self.zipflag = 'j' # class to handle xz compressed tar files -class TarXz(TgzFile): +class TarXzArchive(TgzArchive): def __init__(self, src, dest, module): - super(TarFile, self).__init__(src, dest, module) + super(TarXzArchive, self).__init__(src, dest, module) self.zipflag = 'J' # try handlers in order and return the one that works or bail if none work def pick_handler(src, dest, module): - handlers = [TgzFile, ZipFile, TarFile, TarBzip, TarXz] + handlers = [TgzArchive, ZipArchive, TarArchive, TarBzipArchive, TarXzArchive] for handler in handlers: obj = handler(src, dest, module) if obj.can_handle_archive(): From d66c3fcf5106fb57f606cea92afc71318c7daff0 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 21 Jan 2015 19:50:34 -0500 Subject: [PATCH 245/250] moved defaulting to module constant to after when it is defined --- cloud/docker/docker.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index f71bad42e79..b9c379eed4a 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -524,6 +524,8 @@ class DockerManager(object): # connect to docker server docker_url = urlparse(module.params.get('docker_url')) docker_api_version = module.params.get('docker_api_version') + if not docker_api_version: + docker_api_version=docker.client.DEFAULT_DOCKER_API_VERSION self.client = docker.Client(base_url=docker_url.geturl(), version=docker_api_version) self.docker_py_versioninfo = get_docker_py_versioninfo() @@ -845,7 +847,7 @@ def main(): memory_limit = dict(default=0), memory_swap = dict(default=0), docker_url = dict(default='unix://var/run/docker.sock'), - docker_api_version = dict(default=docker.client.DEFAULT_DOCKER_API_VERSION), + docker_api_version = dict(), username = dict(default=None), password = dict(), email = dict(), From 9e1847ed033a8b283940fbb16660ffe06b14316d Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 21 Jan 2015 20:09:46 -0500 Subject: [PATCH 246/250] ec2 fix docs format --- cloud/amazon/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 29c142514c8..93b496cb5e8 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -64,7 +64,7 @@ options: tenancy: version_added: "1.9" description: - - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are:"default" or "dedicated". NOTE: To use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. + - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are "default" or "dedicated". Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. required: false default: default aliases: [] From c3a0e8a7a4ae3efde08078df1c4bde3cd8abde77 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 22 Jan 2015 18:05:54 -0800 Subject: [PATCH 247/250] Don't fial if virtualenv is not installed and we do not need to initialize the virtualenv Fixes #688 --- packaging/language/pip.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index 17f52c00398..97576a5258b 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -98,7 +98,7 @@ options: required: false default: null notes: - - Please note that virtualenv (U(http://www.virtualenv.org/)) must be installed on the remote host if the virtualenv parameter is specified. + - Please note that virtualenv (U(http://www.virtualenv.org/)) must be installed on the remote host if the virtualenv parameter is specified and the virtualenv needs to be initialized. requirements: [ "virtualenv", "pip" ] author: Matt Wright ''' @@ -252,12 +252,14 @@ def main(): if env: env = os.path.expanduser(env) - virtualenv = os.path.expanduser(virtualenv_command) - if os.path.basename(virtualenv) == virtualenv: - virtualenv = module.get_bin_path(virtualenv_command, True) if not os.path.exists(os.path.join(env, 'bin', 'activate')): if module.check_mode: module.exit_json(changed=True) + + virtualenv = os.path.expanduser(virtualenv_command) + if os.path.basename(virtualenv) == virtualenv: + virtualenv = module.get_bin_path(virtualenv_command, True) + if module.params['virtualenv_site_packages']: cmd = '%s --system-site-packages %s' % (virtualenv, env) else: @@ -278,7 +280,7 @@ def main(): pip = _get_pip(module, env, module.params['executable']) cmd = '%s %s' % (pip, state_map[state]) - + # If there's a virtualenv we want things we install to be able to use other # installations that exist as binaries within this virtualenv. Example: we # install cython and then gevent -- gevent needs to use the cython binary, @@ -308,7 +310,7 @@ def main(): cmd += ' %s' % _get_full_name(name, version) elif requirements: cmd += ' -r %s' % requirements - + this_dir = tempfile.gettempdir() if chdir: this_dir = os.path.join(this_dir, chdir) @@ -319,7 +321,7 @@ def main(): elif name.startswith('svn+') or name.startswith('git+') or \ name.startswith('hg+') or name.startswith('bzr+'): module.exit_json(changed=True) - + freeze_cmd = '%s freeze' % pip rc, out_pip, err_pip = module.run_command(freeze_cmd, cwd=this_dir) From 4f1f8e89c9c380c416badefb48154e40ade91afc Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 22 Jan 2015 21:55:27 -0800 Subject: [PATCH 248/250] More information about distutils on Solaris in case we run into this again --- system/service.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/system/service.py b/system/service.py index 108427bb954..321950cb73b 100644 --- a/system/service.py +++ b/system/service.py @@ -107,6 +107,9 @@ import time import string # The distutils module is not shipped with SUNWPython on Solaris. +# It's in the SUNWPython-devel package which also contains development files +# that don't belong on production boxes. Since our Solaris code doesn't +# depend on LooseVersion, do not import it on Solaris. if platform.system() != 'SunOS': from distutils.version import LooseVersion From d2c6791082b93890ce8ff83b5a2e811139228a99 Mon Sep 17 00:00:00 2001 From: calmera Date: Tue, 20 Jan 2015 08:27:52 +0100 Subject: [PATCH 249/250] Update docker.py Added some more documentation for the memory_limit and volume options. --- cloud/docker/docker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index b9c379eed4a..00e5b40f80f 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -77,7 +77,7 @@ options: version_added: "1.5" volumes: description: - - Set volume(s) to mount on the container + - Set volume(s) to mount on the container separated with a comma (,) and in the format "source:dest[:rights]" required: false default: null aliases: [] @@ -96,11 +96,11 @@ options: version_added: "1.5" memory_limit: description: - - Set RAM allocated to container + - Set RAM allocated to container. It whould be passed as an amount of bytes. For example 1048576 = 1Gb required: false default: null aliases: [] - default: 256MB + default: 262144 docker_url: description: - URL of docker host to issue commands to From 670098af2d5b3351382f82848fcc8fdb5744c8f8 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 22 Jan 2015 23:05:35 -0800 Subject: [PATCH 250/250] Spelling and grammar fix --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 00e5b40f80f..1957c2d4db0 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -96,7 +96,7 @@ options: version_added: "1.5" memory_limit: description: - - Set RAM allocated to container. It whould be passed as an amount of bytes. For example 1048576 = 1Gb + - Set RAM allocated to container. It will be passed as a number of bytes. For example 1048576 = 1Gb required: false default: null aliases: []