From fe5d90f27f5a8af84304324a89e21552d82abfdb Mon Sep 17 00:00:00 2001 From: Rowan Wookey Date: Mon, 16 Mar 2015 17:53:35 +0000 Subject: [PATCH 001/200] Fixes #542 error when ec2_asg arguments aren't specified If max_size/min_size/desired_capacity are omitted when updating an autoscaling group use the existing values --- cloud/amazon/ec2_asg.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 6e5d3508cb8..9c98b282aef 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -47,15 +47,15 @@ options: required: false min_size: description: - - Minimum number of instances in group + - Minimum number of instances in group, if unspecified then the current group value will be used. required: false max_size: description: - - Maximum number of instances in group + - Maximum number of instances in group, if unspecified then the current group value will be used. required: false desired_capacity: description: - - Desired number of instances in group + - Desired number of instances in group, if unspecified then the current group value will be used. required: false replace_all_instances: description: @@ -449,6 +449,13 @@ def replace(connection, module): changed = False return(changed, props) + #check if min_size/max_size/desired capacity have been specified and if not use ASG values + if min_size is None: + min_size = as_group.min_size + if max_size is None: + max_size = as_group.max_size + if desired_capacity is None: + desired_capacity = as_group.desired_capacity # set temporary settings and wait for them to be reached as_group.max_size = max_size + batch_size as_group.min_size = min_size + batch_size From 8cb4b7b01c7a45512105a9e0a30654219facf725 Mon Sep 17 00:00:00 2001 From: Matt Ferrante Date: Wed, 17 Dec 2014 11:30:05 -0500 Subject: [PATCH 002/200] ec2_ami can update an ami's launch_permissions --- cloud/amazon/ec2_ami.py | 69 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 68 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_ami.py b/cloud/amazon/ec2_ami.py index 0d504ee3b0c..a8fad2d1b3d 100644 --- a/cloud/amazon/ec2_ami.py +++ b/cloud/amazon/ec2_ami.py @@ -86,6 +86,12 @@ options: required: false default: null version_added: "2.0" + launch_permissions: + description: + - Users and groups that should be able to launch the ami. Expects dictionary with a key of user_ids and/or group_names. user_ids should be a list of account ids. group_name should be a list of groups, "all" is the only acceptable value currently. + required: false + default: null + aliases: [] author: "Evan Duffield (@scicoin-project) " extends_documentation_fragment: aws @@ -151,6 +157,25 @@ EXAMPLES = ''' delete_snapshot: False state: absent +# Update AMI Launch Permissions, making it public +- ec2_ami: + aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx + aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + region: xxxxxx + image_id: "{{ instance.image_id }}" + state: present + launch_permissions: + group_names: ['all'] + +# Allow AMI to be launched by another account +- ec2_ami: + aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx + aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + region: xxxxxx + image_id: "{{ instance.image_id }}" + state: present + launch_permissions: + user_ids: ['123456789012'] ''' import sys @@ -181,6 +206,7 @@ def create_image(module, ec2): no_reboot = module.params.get('no_reboot') device_mapping = module.params.get('device_mapping') tags = module.params.get('tags') + launch_permissions = module.params.get('launch_permissions') try: params = {'instance_id': instance_id, @@ -241,6 +267,12 @@ def create_image(module, ec2): ec2.create_tags(image_id, tags) except boto.exception.EC2ResponseError, e: module.fail_json(msg = "Image tagging failed => %s: %s" % (e.error_code, e.error_message)) + if launch_permissions: + try: + img = ec2.get_image(image_id) + img.set_launch_permissions(**launch_permissions) + except boto.exception.BotoServerError, e: + module.fail_json(msg="%s: %s" % (e.error_code, e.error_message), image_id=image_id) module.exit_json(msg="AMI creation operation complete", image_id=image_id, state=img.state, changed=True) @@ -281,6 +313,36 @@ def deregister_image(module, ec2): sys.exit(0) +def update_image(module, ec2): + """ + Updates AMI + """ + + image_id = module.params.get('image_id') + launch_permissions = module.params.get('launch_permissions') + if 'user_ids' in launch_permissions: + launch_permissions['user_ids'] = [str(user_id) for user_id in launch_permissions['user_ids']] + + img = ec2.get_image(image_id) + if img == None: + module.fail_json(msg = "Image %s does not exist" % image_id, changed=False) + + try: + set_permissions = img.get_launch_permissions() + if set_permissions != launch_permissions: + if ('user_ids' in launch_permissions and launch_permissions['user_ids']) or ('group_names' in launch_permissions and launch_permissions['group_names']): + res = img.set_launch_permissions(**launch_permissions) + elif ('user_ids' in set_permissions and set_permissions['user_ids']) or ('group_names' in set_permissions and set_permissions['group_names']): + res = img.remove_launch_permissions(**set_permissions) + else: + module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False) + module.exit_json(msg="AMI launch permissions updated", launch_permissions=launch_permissions, set_perms=set_permissions, changed=True) + else: + module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False) + + except boto.exception.BotoServerError, e: + module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( @@ -294,7 +356,8 @@ def main(): no_reboot = dict(default=False, type="bool"), state = dict(default='present'), device_mapping = dict(type='list'), - tags = dict(type='dict') + tags = dict(type='dict'), + launch_permissions = dict(type='dict') ) ) module = AnsibleModule(argument_spec=argument_spec) @@ -314,6 +377,10 @@ def main(): deregister_image(module, ec2) elif module.params.get('state') == 'present': + if module.params.get('image_id') and module.params.get('launch_permissions'): + # Update image's launch permissions + update_image(module, ec2) + # Changed is always set to true when provisioning new AMI if not module.params.get('instance_id'): module.fail_json(msg='instance_id parameter is required for new image') From f4228d81d2103af443d05076acbf70f0ba6c7dac Mon Sep 17 00:00:00 2001 From: HAMSIK Adam Date: Thu, 6 Aug 2015 16:51:36 +0200 Subject: [PATCH 003/200] Convert enabled value to boolean to actually work, make sure we can set expiration period to 0(None) to disable it --- cloud/amazon/ec2_elb_lb.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 3d54f994436..856b6b3787a 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -755,21 +755,25 @@ class ElbManager(object): if self.stickiness['type'] == 'loadbalancer': policy = [] policy_type = 'LBCookieStickinessPolicyType' - if self.stickiness['enabled'] == True: + + if self.module.boolean(self.stickiness['enabled']) == True: if 'expiration' not in self.stickiness: self.module.fail_json(msg='expiration must be set when type is loadbalancer') + expiration = self.stickiness['expiration'] if self.stickiness['expiration'] is not 0 else None + policy_attrs = { 'type': policy_type, 'attr': 'lb_cookie_stickiness_policies', 'method': 'create_lb_cookie_stickiness_policy', 'dict_key': 'cookie_expiration_period', - 'param_value': self.stickiness['expiration'] + 'param_value': expiration } policy.append(self._policy_name(policy_attrs['type'])) + self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs) - elif self.stickiness['enabled'] == False: + elif self.module.boolean(self.stickiness['enabled']) == False: if len(elb_info.policies.lb_cookie_stickiness_policies): if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type): self.changed = True @@ -781,7 +785,7 @@ class ElbManager(object): elif self.stickiness['type'] == 'application': policy = [] policy_type = 'AppCookieStickinessPolicyType' - if self.stickiness['enabled'] == True: + if self.module.boolean(self.stickiness['enabled']) == True: if 'cookie' not in self.stickiness: self.module.fail_json(msg='cookie must be set when type is application') @@ -795,7 +799,7 @@ class ElbManager(object): } policy.append(self._policy_name(policy_attrs['type'])) self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs) - elif self.stickiness['enabled'] == False: + elif self.module.boolean(self.stickiness['enabled']) == False: if len(elb_info.policies.app_cookie_stickiness_policies): if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type): self.changed = True From cd8bfc7695b8bb2a6622afff04fd5d23752df815 Mon Sep 17 00:00:00 2001 From: sysadmin75 Date: Thu, 6 Aug 2015 16:37:48 -0400 Subject: [PATCH 004/200] Adds tmp_dest option to get_url module. Addresses the issue in ansible/ansible#9512 --- network/basics/get_url.py | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 5e39887df7f..d0cc02408fe 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -55,6 +55,14 @@ options: If C(dest) is a directory, the file will always be downloaded (regardless of the force option), but replaced only if the contents changed. required: true + tmp_dest: + description: + - absolute path of where temporary file is downloaded to. + - Defaults to TMPDIR, TEMP or TMP env variables or a platform specific value + - https://docs.python.org/2/library/tempfile.html#tempfile.tempdir + required: false + default: '' + version_added: '2.0' force: description: - If C(yes) and C(dest) is not a directory, will download the file every @@ -163,7 +171,7 @@ def url_filename(url): return 'index.html' return fn -def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None): +def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None, tmp_dest=''): """ Download data from the url and store in a temporary file. @@ -179,7 +187,19 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, head if info['status'] != 200: module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest) - fd, tempname = tempfile.mkstemp() + if tmp_dest != '': + # tmp_dest should be an existing dir + tmp_dest_is_dir = os.path.isdir(tmp_dest) + if not tmp_dest_is_dir: + if os.path.exists(tmp_dest): + module.fail_json(msg="%s is a file but should be a directory." % tmp_dest) + else: + module.fail_json(msg="%s directoy does not exist." % tmp_dest) + + fd, tempname = tempfile.mkstemp(dir=tmp_dest) + else: + fd, tempname = tempfile.mkstemp() + f = os.fdopen(fd, 'wb') try: shutil.copyfileobj(rsp, f) @@ -221,6 +241,7 @@ def main(): sha256sum = dict(default=''), timeout = dict(required=False, type='int', default=10), headers = dict(required=False, default=None), + tmp_dest = dict(required=False, default=''), ) module = AnsibleModule( @@ -235,7 +256,8 @@ def main(): sha256sum = module.params['sha256sum'] use_proxy = module.params['use_proxy'] timeout = module.params['timeout'] - + tmp_dest = os.path.expanduser(module.params['tmp_dest']) + # Parse headers to dict if module.params['headers']: try: @@ -279,7 +301,7 @@ def main(): last_mod_time = datetime.datetime.utcfromtimestamp(mtime) # download to tmpsrc - tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers) + tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest) # Now the request has completed, we can finally generate the final # destination file name from the info dict. From c93456adc3d330a192591ef4f10d83163c10c32d Mon Sep 17 00:00:00 2001 From: Bill Nottingham Date: Tue, 18 Aug 2015 10:14:59 -0400 Subject: [PATCH 005/200] win_msi: document extra_args The extra_args parameter was not documented. It's needed for installing some MSIs. --- windows/win_msi.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/windows/win_msi.py b/windows/win_msi.py index 01f09709f57..bd504879a83 100644 --- a/windows/win_msi.py +++ b/windows/win_msi.py @@ -34,6 +34,10 @@ options: description: - File system path to the MSI file to install required: true + extra_args: + description: + - Additional arguments to pass to the msiexec.exe command + required: false state: description: - Whether the MSI file should be installed or uninstalled From 8fa1e9515b125239617aa883b70ce6aaacefc8cc Mon Sep 17 00:00:00 2001 From: tobbe Date: Wed, 19 Aug 2015 22:42:49 +0200 Subject: [PATCH 006/200] Add suport for selinux user when adding a new user on selinux enabled systems --- system/user.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/system/user.py b/system/user.py index 7e3e4c01cd3..1a9c1f07926 100644 --- a/system/user.py +++ b/system/user.py @@ -49,6 +49,10 @@ options: - Optionally when used with the -u option, this option allows to change the user ID to a non-unique value. version_added: "1.1" + seuser: + required: false + description: + - Optionally sets the seuser type (user_u). group: required: false description: @@ -254,6 +258,7 @@ class User(object): self.name = module.params['name'] self.uid = module.params['uid'] self.non_unique = module.params['non_unique'] + self.seuser = module.params['seuser'] self.group = module.params['group'] self.groups = module.params['groups'] self.comment = module.params['comment'] @@ -321,6 +326,9 @@ class User(object): if self.non_unique: cmd.append('-o') + if self.seuser is not None: + cmd.append('-Z') + cmd.append(self.seuser) if self.group is not None: if not self.group_exists(self.group): self.module.fail_json(msg="Group %s does not exist" % self.group) @@ -2050,6 +2058,8 @@ def main(): shell=dict(default=None, type='str'), password=dict(default=None, type='str'), login_class=dict(default=None, type='str'), + # following options are specific to selinux + seuser=dict(default=None, type='str'), # following options are specific to userdel force=dict(default='no', type='bool'), remove=dict(default='no', type='bool'), From b04efa22c4403ca869e94e7918721306d23afa8d Mon Sep 17 00:00:00 2001 From: Sarah Haskins Date: Fri, 21 Aug 2015 11:33:28 -0400 Subject: [PATCH 007/200] Expose cache_parameter_group_name in elasticache module The cache_parameter_group_name was not previously exposed in elasticachy.py, I have exposed it, as optional. --- cloud/amazon/elasticache.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index 31ed4696628..32756bb8c22 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -43,6 +43,11 @@ options: - The version number of the cache engine required: false default: none + cache_parameter_group_name: + description: + - The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group for the specified engine will be used. + required: false + default: none node_type: description: - The compute and memory capacity of the nodes in the cache cluster @@ -150,11 +155,12 @@ class ElastiCacheManager(object): def __init__(self, module, name, engine, cache_engine_version, node_type, num_nodes, cache_port, cache_subnet_group, cache_security_groups, security_group_ids, zone, wait, - hard_modify, region, **aws_connect_kwargs): + hard_modify, region, cache_parameter_group_name=None, **aws_connect_kwargs): self.module = module self.name = name self.engine = engine self.cache_engine_version = cache_engine_version + self.cache_parameter_group_name = cache_parameter_group_name self.node_type = node_type self.num_nodes = num_nodes self.cache_port = cache_port @@ -214,6 +220,7 @@ class ElastiCacheManager(object): cache_node_type=self.node_type, engine=self.engine, engine_version=self.cache_engine_version, + cache_parameter_group_name=self.cache_parameter_group_name, cache_security_group_names=self.cache_security_groups, security_group_ids=self.security_group_ids, cache_subnet_group_name=self.cache_subnet_group, @@ -293,7 +300,8 @@ class ElastiCacheManager(object): cache_security_group_names=self.cache_security_groups, security_group_ids=self.security_group_ids, apply_immediately=True, - engine_version=self.cache_engine_version) + engine_version=self.cache_engine_version, + cache_parameter_group_name=self.cache_parameter_group_name) except boto.exception.BotoServerError, e: self.module.fail_json(msg=e.message) @@ -478,6 +486,7 @@ def main(): name={'required': True}, engine={'required': False, 'default': 'memcached'}, cache_engine_version={'required': False}, + cache_parameter_group_name={'required': False}, node_type={'required': False, 'default': 'cache.m1.small'}, num_nodes={'required': False, 'default': None, 'type': 'int'}, cache_port={'required': False, 'type': 'int'}, @@ -505,6 +514,7 @@ def main(): state = module.params['state'] engine = module.params['engine'] cache_engine_version = module.params['cache_engine_version'] + cache_parameter_group_name = module.params['cache_parameter_group_name'] node_type = module.params['node_type'] num_nodes = module.params['num_nodes'] cache_port = module.params['cache_port'] @@ -530,12 +540,15 @@ def main(): module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) elasticache_manager = ElastiCacheManager(module, name, engine, - cache_engine_version, node_type, + cache_engine_version, + node_type, num_nodes, cache_port, cache_subnet_group, cache_security_groups, security_group_ids, zone, wait, - hard_modify, region, **aws_connect_kwargs) + hard_modify, region, + cache_parameter_group_name=cache_parameter_group_name, + **aws_connect_kwargs) if state == 'present': elasticache_manager.ensure_present() From 8f03f1e4e142bf40ff84e7e163651bd83cad3885 Mon Sep 17 00:00:00 2001 From: Yann Hamon Date: Wed, 26 Aug 2015 00:01:35 +0200 Subject: [PATCH 008/200] Docker module: restarted should update the container when necessary --- cloud/docker/docker.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 82c39006678..b542313079f 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1586,6 +1586,10 @@ def restarted(manager, containers, count, name): containers.refresh() + for container in manager.get_differing_containers(): + manager.stop_containers([container]) + manager.remove_containers([container]) + manager.restart_containers(containers.running) started(manager, containers, count, name) From 30576ad0c7e411aaa2b78995c669f375d428b859 Mon Sep 17 00:00:00 2001 From: tobbe Date: Sat, 5 Sep 2015 14:39:52 +0200 Subject: [PATCH 009/200] add text to the description, more user friendly --- system/user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/user.py b/system/user.py index 1a9c1f07926..8303bed0d10 100644 --- a/system/user.py +++ b/system/user.py @@ -52,7 +52,7 @@ options: seuser: required: false description: - - Optionally sets the seuser type (user_u). + - Optionally sets the seuser type (user_u) on selinux enabled systems. group: required: false description: From 8052d49b07e12fdaa3af5acea2d50eb93c1b5a83 Mon Sep 17 00:00:00 2001 From: Philippe Jandot Date: Wed, 16 Sep 2015 16:49:09 +0200 Subject: [PATCH 010/200] fix regression introduced by f38186ce8b49ea98e29241712da45917a3154e73, and propose a fix for docker facts --- cloud/docker/docker.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 0ab564208ba..a052fa388d0 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1525,7 +1525,8 @@ def present(manager, containers, count, name): delta = count - len(containers.deployed) if delta > 0: - containers.notice_changed(manager.create_containers(delta)) + created = manager.create_containers(delta) + containers.notice_changed(manager.get_inspect_containers(created)) if delta < 0: # If both running and stopped containers exist, remove @@ -1540,8 +1541,8 @@ def present(manager, containers, count, name): to_remove.append(c) manager.stop_containers(to_stop) + containers.notice_changed(manager.get_inspect_containers(to_remove)) manager.remove_containers(to_remove) - containers.notice_changed(to_remove) def started(manager, containers, count, name): '''Ensure that exactly `count` matching containers exist and are running.''' @@ -1557,13 +1558,13 @@ def started(manager, containers, count, name): created = manager.create_containers(delta) manager.start_containers(created) - containers.notice_changed(created) + containers.notice_changed(manager.get_inspect_containers(created)) if delta < 0: excess = containers.running[0:-delta] + containers.notice_changed(manager.get_inspect_containers(excess)) manager.stop_containers(excess) manager.remove_containers(excess) - containers.notice_changed(excess) def reloaded(manager, containers, count, name): ''' @@ -1597,7 +1598,7 @@ def stopped(manager, containers, count, name): containers.refresh() manager.stop_containers(containers.running) - containers.notice_changed(containers.running) + containers.notice_changed(manager.get_inspect_containers(containers.running)) def killed(manager, containers, count, name): '''Kill any matching containers that are running.''' @@ -1605,7 +1606,7 @@ def killed(manager, containers, count, name): containers.refresh() manager.kill_containers(containers.running) - containers.notice_changed(containers.running) + containers.notice_changed(manager.get_inspect_containers(containers.running)) def absent(manager, containers, count, name): '''Stop and remove any matching containers.''' @@ -1613,8 +1614,8 @@ def absent(manager, containers, count, name): containers.refresh() manager.stop_containers(containers.running) + containers.notice_changed(manager.get_inspect_containers(containers.deployed)) manager.remove_containers(containers.deployed) - containers.notice_changed(containers.deployed) def main(): module = AnsibleModule( @@ -1727,9 +1728,8 @@ def main(): module.exit_json(changed=manager.has_changed(), msg=manager.get_summary_message(), summary=manager.counters, - containers=containers.changed, reload_reasons=manager.get_reload_reason_message(), - ansible_facts=_ansible_facts(manager.get_inspect_containers(containers.changed))) + ansible_facts=_ansible_facts(containers.changed)) except DockerAPIError as e: module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation) From 08b09fcc7055307feb58b3ade88abf93babc94ff Mon Sep 17 00:00:00 2001 From: Jumpei Ogawa Date: Thu, 17 Sep 2015 15:31:14 +0900 Subject: [PATCH 011/200] Add better error message when specified network doesn't exist and ipv4_range is not specified --- cloud/google/gce_net.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/google/gce_net.py b/cloud/google/gce_net.py index 3ae1635ded7..269c05715dc 100644 --- a/cloud/google/gce_net.py +++ b/cloud/google/gce_net.py @@ -212,7 +212,7 @@ def main(): # user wants to create a new network that doesn't yet exist if name and not network: if not ipv4_range: - module.fail_json(msg="Missing required 'ipv4_range' parameter", + module.fail_json(msg="Network '" + name + "' is not found. To create network, 'ipv4_range' parameter is required", changed=False) try: From 7b4b61faffbff877167d33a11a0f6627912fc21e Mon Sep 17 00:00:00 2001 From: Jumpei Ogawa Date: Thu, 17 Sep 2015 15:37:07 +0900 Subject: [PATCH 012/200] Add explanation in case that ipv4_range is required --- cloud/google/gce_net.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/google/gce_net.py b/cloud/google/gce_net.py index 269c05715dc..5c412affb78 100644 --- a/cloud/google/gce_net.py +++ b/cloud/google/gce_net.py @@ -40,6 +40,7 @@ options: ipv4_range: description: - the IPv4 address range in CIDR notation for the network + this parameter is not mandatory when you specified existing network in name parameter, but when you create new network, this parameter is mandatory required: false aliases: ['cidr'] fwname: From 2080c8ab6e801ab62545624d2a47e92a802b48a0 Mon Sep 17 00:00:00 2001 From: Leonty Date: Fri, 25 Sep 2015 23:58:20 +0300 Subject: [PATCH 013/200] Support 'labels' parameter for docker. --- cloud/docker/docker.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 0ab564208ba..c22013bb933 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -320,6 +320,13 @@ options: default: false aliases: [] version_added: "2.0" + labels: + description: + - Set container labels. Requires docker >= 1.6 and docker-py >= 1.2.0. + requered: false + default: null + version_added: "1.9.4" + author: - "Cove Schneider (@cove)" - "Joshua Conner (@joshuaconner)" @@ -597,6 +604,7 @@ class DockerManager(object): 'cap_add': ((0, 5, 0), '1.14'), 'cap_drop': ((0, 5, 0), '1.14'), 'read_only': ((1, 0, 0), '1.17'), + 'labels': ((1, 2, 0), '1.18'), # Clientside only 'insecure_registry': ((0, 5, 0), '0.0') } @@ -1123,6 +1131,22 @@ class DockerManager(object): differing.append(container) continue + # LABELS + + expected_labels = {} + for name, value in self.module.params.get('labels').iteritems(): + expected_labels[name] = str(value) + + actual_labels = {} + for container_label in container['Config']['Labels'] or []: + name, value = container_label.split('=', 1) + actual_labels[name] = value + + if actual_labels != expected_labels: + self.reload_reasons.append('labels {0} => {1}'.format(actual_labels, expected_labels)) + differing.append(container) + continue + # HOSTNAME expected_hostname = self.module.params.get('hostname') @@ -1414,6 +1438,7 @@ class DockerManager(object): 'ports': self.exposed_ports, 'volumes': self.volumes, 'environment': self.env, + 'labels': self.module.params.get('labels'), 'hostname': self.module.params.get('hostname'), 'domainname': self.module.params.get('domainname'), 'detach': self.module.params.get('detach'), @@ -1668,6 +1693,7 @@ def main(): cap_add = dict(default=None, type='list'), cap_drop = dict(default=None, type='list'), read_only = dict(default=None, type='bool'), + labels = dict(default={}, type='dict'), ), required_together = ( ['tls_client_cert', 'tls_client_key'], From 83074ad501d84ebd224f782e141847562529a346 Mon Sep 17 00:00:00 2001 From: Mike Christofilopoulos Date: Thu, 1 Oct 2015 17:29:21 +0100 Subject: [PATCH 014/200] add new disks automatically when the 'vm_disk' section changes --- cloud/vmware/vsphere_guest.py | 72 ++++++++++++++++++++++++++++++++++- 1 file changed, 70 insertions(+), 2 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index b8adb7930c3..85e184cc318 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python2 # -*- coding: utf-8 -*- # This file is part of Ansible @@ -702,11 +702,75 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo ) +def update_disks(vsphere_client, vm, module, vm_disk, changes): + request = VI.ReconfigVM_TaskRequestMsg() + changed = False + + for cnf_disk in vm_disk: + disk_id = re.sub("disk", "", cnf_disk) + found = False + for dev_key in vm._devices: + if vm._devices[dev_key]['type'] == 'VirtualDisk': + hdd_id = vm._devices[dev_key]['label'].split()[2] + if disk_id == hdd_id: + found = True + continue + if not found: + it = VI.ReconfigVM_TaskRequestMsg() + _this = request.new__this(vm._mor) + _this.set_attribute_type(vm._mor.get_attribute_type()) + request.set_element__this(_this) + + spec = request.new_spec() + + dc = spec.new_deviceChange() + dc.Operation = "add" + dc.FileOperation = "create" + + hd = VI.ns0.VirtualDisk_Def("hd").pyclass() + hd.Key = -100 + hd.UnitNumber = int(disk_id) + hd.CapacityInKB = int(vm_disk[cnf_disk]['size_gb']) * 1024 * 1024 + hd.ControllerKey = 1000 + + # module.fail_json(msg="peos : %s" % vm_disk[cnf_disk]) + backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("backing").pyclass() + backing.FileName = "[%s]" % vm_disk[cnf_disk]['datastore'] + backing.DiskMode = "persistent" + backing.Split = False + backing.WriteThrough = False + backing.ThinProvisioned = False + backing.EagerlyScrub = False + hd.Backing = backing + + dc.Device = hd + + spec.DeviceChange = [dc] + request.set_element_spec(spec) + + ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval + + # Wait for the task to finish + task = VITask(ret, vsphere_client) + status = task.wait_for_state([task.STATE_SUCCESS, + task.STATE_ERROR]) + + if status == task.STATE_SUCCESS: + changed = True + changes[cnf_disk] = vm_disk[cnf_disk] + elif status == task.STATE_ERROR: + module.fail_json( + msg="Error reconfiguring vm: %s, [%s]" % ( + task.get_error_message(), + vm_disk[cnf_disk])) + return changed, changes + + def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force): spec = None changed = False changes = {} - request = VI.ReconfigVM_TaskRequestMsg() + request = None shutdown = False poweron = vm.is_powered_on() @@ -714,6 +778,10 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled) cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled) + changed, changes = update_disks(vsphere_client, vm, + module, vm_disk, changes) + request = VI.ReconfigVM_TaskRequestMsg() + # Change Memory if 'memory_mb' in vm_hardware: From 8c9a9c0802f31a44a3fca401b027e9abea99da61 Mon Sep 17 00:00:00 2001 From: Mike Date: Thu, 1 Oct 2015 17:38:46 +0100 Subject: [PATCH 015/200] update_disks(): added origins of the code. --- cloud/vmware/vsphere_guest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 85e184cc318..f13eebbf2df 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -701,7 +701,8 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo msg="Could not clone selected machine: %s" % e ) - +# example from https://github.com/kalazzerx/pysphere/blob/master/examples/pysphere_create_disk_and_add_to_vm.py +# was used. def update_disks(vsphere_client, vm, module, vm_disk, changes): request = VI.ReconfigVM_TaskRequestMsg() changed = False From e96549c95d44120d885bcacfeacd3d6a56fce579 Mon Sep 17 00:00:00 2001 From: Andrew Pashkin Date: Fri, 2 Oct 2015 00:44:52 +0300 Subject: [PATCH 016/200] Harden matching running containers by "command" in the Docker module Before this patch: - Command was matched if 'Command' field of docker-py representation of Docker container ends with 'command' passed to Ansible docker module by user. - That can give false positives and false negatives. - For example: a) If 'command' was set up with more than one spaces, like 'command=sleep 123', it would be never matched again with a container(s) launched by this task. Because after launching, command would be normalized and appear, in docker-py API call, just as 'sleep 123' - with one space. This is false negative case. b) If 'entrypoint + command = command', for example 'sleep + 123 = sleep 123', module would give false positive match. This patch fixes it, by making matching more explicit - against 'Config'->Cmd' field of 'docker inspect' output, provided by docker-py API and with proper normalization of user input by splitting it to tokens with 'shlex.split()'. --- cloud/docker/docker.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 0ab564208ba..cefae3db3df 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1314,8 +1314,8 @@ class DockerManager(object): """ command = self.module.params.get('command') - if command: - command = command.strip() + if command is not None: + command = shlex.split(command) name = self.module.params.get('name') if name and not name.startswith('/'): name = '/' + name @@ -1342,13 +1342,10 @@ class DockerManager(object): details = _docker_id_quirk(details) running_image = normalize_image(details['Config']['Image']) - running_command = container['Command'].strip() image_matches = running_image in repo_tags - # if a container has an entrypoint, `command` will actually equal - # '{} {}'.format(entrypoint, command) - command_matches = (not command or running_command.endswith(command)) + command_matches = command == details['Config']['Cmd'] matches = image_matches and command_matches From 9b04ca55f1526b57f90f5cf12e30c3920753480a Mon Sep 17 00:00:00 2001 From: Michael Fenn Date: Sat, 3 Oct 2015 14:31:22 -0400 Subject: [PATCH 017/200] Support cloning VMs into a specific VM folder The pysphere VIVirtualMachine.clone() method supports specifying a VM folder to place the VM in after the clone has completed. This exposes that functionality to playbooks. Also documents that creating VMs could always place VMs in a specific folder. --- cloud/vmware/vsphere_guest.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index b8adb7930c3..7c3513a8d27 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -170,6 +170,7 @@ EXAMPLES = ''' vcpu.hotadd: yes mem.hotadd: yes notes: This is a test VM + folder: MyFolder vm_disk: disk1: size_gb: 10 @@ -241,6 +242,8 @@ EXAMPLES = ''' template_src: centosTemplate cluster: MainCluster resource_pool: "/Resources" + vm_extra_config: + folder: MyFolder # Task to gather facts from a vSphere cluster only if the system is a VMWare guest @@ -597,7 +600,7 @@ def vmdisk_id(vm, current_datastore_name): return id_list -def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name, snapshot_to_clone, power_on_after_clone): +def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name, snapshot_to_clone, power_on_after_clone, vm_extra_config): vmTemplate = vsphere_client.get_vm_by_name(template_src) vmTarget = None @@ -689,6 +692,10 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo cloneArgs["linked"] = True cloneArgs["snapshot"] = snapshot_to_clone + if vm_extra_config.get("folder") is not None: + # if a folder is specified, clone the VM into it + cloneArgs["folder"] = vm_extra_config.get("folder") + vmTemplate.clone(guest, **cloneArgs) changed = True else: @@ -1455,7 +1462,8 @@ def main(): module=module, cluster_name=cluster, snapshot_to_clone=snapshot_to_clone, - power_on_after_clone=power_on_after_clone + power_on_after_clone=power_on_after_clone, + vm_extra_config=vm_extra_config ) if state in ['restarted', 'reconfigured']: From cee7e928fc2cb911480aae0c3ed53501034f4611 Mon Sep 17 00:00:00 2001 From: Andrew Pashkin Date: Fri, 2 Oct 2015 01:09:08 +0300 Subject: [PATCH 018/200] Add 'entrypoint' parameter to Docker module --- cloud/docker/docker.py | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index cefae3db3df..3bc42629709 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -46,6 +46,14 @@ options: default: missing choices: [ "missing", "always" ] version_added: "1.9" + entrypoint: + description: + - Corresponds to ``--entrypoint`` option of ``docker run`` command and + ``ENTRYPOINT`` directive of Dockerfile. + Used to match and launch containers. + default: null + required: false + version_added: "2.0" command: description: - Command used to match and launch containers. @@ -1043,6 +1051,21 @@ class DockerManager(object): differing.append(container) continue + # ENTRYPOINT + + expected_entrypoint = self.module.params.get('entrypoint') + if expected_entrypoint: + expected_entrypoint = shlex.split(expected_entrypoint) + actual_entrypoint = container["Config"]["Entrypoint"] + + if actual_entrypoint != expected_entrypoint: + self.reload_reasons.append( + 'entrypoint ({0} => {1})' + .format(actual_entrypoint, expected_entrypoint) + ) + differing.append(container) + continue + # COMMAND expected_command = self.module.params.get('command') @@ -1313,6 +1336,9 @@ class DockerManager(object): Return any matching containers that are already present. """ + entrypoint = self.module.params.get('entrypoint') + if entrypoint is not None: + entrypoint = shlex.split(entrypoint) command = self.module.params.get('command') if command is not None: command = shlex.split(command) @@ -1346,8 +1372,12 @@ class DockerManager(object): image_matches = running_image in repo_tags command_matches = command == details['Config']['Cmd'] + entrypoint_matches = ( + entrypoint == details['Config']['Entrypoint'] + ) - matches = image_matches and command_matches + matches = (image_matches and command_matches and + entrypoint_matches) if matches: if not details: @@ -1407,6 +1437,7 @@ class DockerManager(object): api_version = self.client.version()['ApiVersion'] params = {'image': self.module.params.get('image'), + 'entrypoint': self.module.params.get('entrypoint'), 'command': self.module.params.get('command'), 'ports': self.exposed_ports, 'volumes': self.volumes, @@ -1619,6 +1650,7 @@ def main(): count = dict(default=1), image = dict(required=True), pull = dict(required=False, default='missing', choices=['missing', 'always']), + entrypoint = dict(required=False, default=None, type='str'), command = dict(required=False, default=None), expose = dict(required=False, default=None, type='list'), ports = dict(required=False, default=None, type='list'), From d0b30dd86de218dc27449d42134b2487ab0b3880 Mon Sep 17 00:00:00 2001 From: Kai Webber Date: Tue, 6 Oct 2015 20:26:10 +0300 Subject: [PATCH 019/200] Added launch group support for ec2 module --- cloud/amazon/ec2.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 6572a9286f4..aed6d757a68 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -247,6 +247,13 @@ options: required: false default: null aliases: ['network_interface'] + spot_launch_group: + version_added: "2.0" + description: + - Launch group for spot request, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group) + required: false + default: null + aliases: [] author: - "Tim Gerla (@tgerla)" @@ -358,6 +365,7 @@ EXAMPLES = ''' wait: yes vpc_subnet_id: subnet-29e63245 assign_public_ip: yes + spot_launch_group: report_generators # Examples using pre-existing network interfaces - ec2: @@ -858,6 +866,7 @@ def create_instances(module, ec2, vpc, override_count=None): source_dest_check = module.boolean(module.params.get('source_dest_check')) termination_protection = module.boolean(module.params.get('termination_protection')) network_interfaces = module.params.get('network_interfaces') + spot_launch_group = module.params.get('spot_launch_group') # group_id and group_name are exclusive of each other if group_id and group_name: @@ -1040,6 +1049,9 @@ def create_instances(module, ec2, vpc, override_count=None): module.fail_json( msg="placement_group parameter requires Boto version 2.3.0 or higher.") + if spot_launch_group and isinstance(spot_launch_group, basestring): + params['launch_group'] = spot_launch_group + params.update(dict( count = count_remaining, type = spot_type, @@ -1304,6 +1316,7 @@ def main(): instance_type = dict(aliases=['type']), spot_price = dict(), spot_type = dict(default='one-time', choices=["one-time", "persistent"]), + spot_launch_group = dict(), image = dict(), kernel = dict(), count = dict(type='int', default='1'), From 5f914e854b6515515c0847ab2266ee9357853848 Mon Sep 17 00:00:00 2001 From: Gilad Peleg Date: Wed, 7 Oct 2015 14:49:05 +0300 Subject: [PATCH 020/200] Add state=running on some ec2 examples `state=running` was missing in some of the ec2 module examples --- cloud/amazon/ec2.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 6572a9286f4..608a05fef54 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -481,6 +481,7 @@ EXAMPLES = ''' # - ec2: + state: running key_name: mykey instance_type: c1.medium image: ami-40603AD1 @@ -498,6 +499,7 @@ EXAMPLES = ''' # - ec2: + state: running key_name: mykey instance_type: c1.medium image: ami-40603AD1 From 686ceb81d7bf5c2e569ffee2cc234dd14d68fd38 Mon Sep 17 00:00:00 2001 From: Adrian Bridgett Date: Thu, 8 Oct 2015 16:25:39 +0100 Subject: [PATCH 021/200] add idle_timeout support --- cloud/amazon/ec2_elb_lb.py | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 37ba3fc1eb6..872ee2bedc7 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -102,6 +102,12 @@ options: required: false aliases: [] version_added: "1.8" + idle_timeout: + description: + - ELB connections from clients and to servers are timed out after this amount of time + required: false + aliases: [] + version_added: "2.0" cross_az_load_balancing: description: - Distribute load across all configured Availability Zones @@ -232,13 +238,14 @@ EXAMPLES = """ load_balancer_port: 80 instance_port: 80 -# Create an ELB with connection draining and cross availability +# Create an ELB with connection draining, increased idle timeout and cross availability # zone load balancing - local_action: module: ec2_elb_lb name: "New ELB" state: present connection_draining_timeout: 60 + idle_timeout: 300 cross_az_load_balancing: "yes" region: us-east-1 zones: @@ -305,7 +312,7 @@ class ElbManager(object): zones=None, purge_zones=None, security_group_ids=None, health_check=None, subnets=None, purge_subnets=None, scheme="internet-facing", connection_draining_timeout=None, - cross_az_load_balancing=None, + idle_timeout=None, cross_az_load_balancing=None, stickiness=None, region=None, **aws_connect_params): self.module = module @@ -320,6 +327,7 @@ class ElbManager(object): self.purge_subnets = purge_subnets self.scheme = scheme self.connection_draining_timeout = connection_draining_timeout + self.idle_timeout = idle_timeout self.cross_az_load_balancing = cross_az_load_balancing self.stickiness = stickiness @@ -347,6 +355,8 @@ class ElbManager(object): # set them to avoid errors if self._check_attribute_support('connection_draining'): self._set_connection_draining_timeout() + if self._check_attribute_support('connecting_settings'): + self._set_idle_timeout() if self._check_attribute_support('cross_zone_load_balancing'): self._set_cross_az_load_balancing() # add sitcky options @@ -442,6 +452,9 @@ class ElbManager(object): if self._check_attribute_support('connection_draining'): info['connection_draining_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout + if self._check_attribute_support('connecting_settings'): + info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout + if self._check_attribute_support('cross_zone_load_balancing'): is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing') if is_cross_az_lb_enabled: @@ -705,6 +718,12 @@ class ElbManager(object): attributes.connection_draining.enabled = False self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining) + def _set_idle_timeout(self): + attributes = self.elb.get_attributes() + if self.idle_timeout is not None: + attributes.connecting_settings.idle_timeout = self.idle_timeout + self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings) + def _policy_name(self, policy_type): return __file__.split('/')[-1].replace('_', '-') + '-' + policy_type @@ -829,6 +848,7 @@ def main(): purge_subnets={'default': False, 'required': False, 'type': 'bool'}, scheme={'default': 'internet-facing', 'required': False}, connection_draining_timeout={'default': None, 'required': False}, + idle_timeout={'default': None, 'required': False}, cross_az_load_balancing={'default': None, 'required': False}, stickiness={'default': None, 'required': False, 'type': 'dict'} ) @@ -859,6 +879,7 @@ def main(): purge_subnets = module.params['purge_subnets'] scheme = module.params['scheme'] connection_draining_timeout = module.params['connection_draining_timeout'] + idle_timeout = module.params['idle_timeout'] cross_az_load_balancing = module.params['cross_az_load_balancing'] stickiness = module.params['stickiness'] @@ -886,8 +907,8 @@ def main(): elb_man = ElbManager(module, name, listeners, purge_listeners, zones, purge_zones, security_group_ids, health_check, subnets, purge_subnets, scheme, - connection_draining_timeout, cross_az_load_balancing, - stickiness, + connection_draining_timeout, idle_timeout, + cross_az_load_balancing, stickiness, region=region, **aws_connect_params) # check for unsupported attributes for this version of boto @@ -897,6 +918,9 @@ def main(): if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'): module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute") + if idle_timeout and not elb_man._check_attribute_support('connecting_settings'): + module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute") + if state == 'present': elb_man.ensure_ok() elif state == 'absent': From 81e9d1bde56f89edb2ca370a3c9e6838cff50072 Mon Sep 17 00:00:00 2001 From: Aaron Boushley Date: Fri, 9 Oct 2015 16:11:44 -0700 Subject: [PATCH 022/200] Fix issue with comparing versions improperly. This allows old versions of docker api to function. --- cloud/docker/docker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 0ab564208ba..ab71eb25b66 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1088,7 +1088,7 @@ class DockerManager(object): self.module.fail_json(msg=str(e)) #For v1.19 API and above use HostConfig, otherwise use Config - if api_version >= 1.19: + if docker.utils.compare_version('1.19', api_version) >= 0: actual_mem = container['HostConfig']['Memory'] else: actual_mem = container['Config']['Memory'] @@ -1427,7 +1427,7 @@ class DockerManager(object): params['host_config'] = self.create_host_config() #For v1.19 API and above use HostConfig, otherwise use Config - if api_version < 1.19: + if docker.utils.compare_version('1.19', api_version) < 0: params['mem_limit'] = mem_limit else: params['host_config']['Memory'] = mem_limit From cc821492d315d1cb92494edf93942017f61dfee8 Mon Sep 17 00:00:00 2001 From: Adrian Bridgett Date: Mon, 12 Oct 2015 17:45:37 +0100 Subject: [PATCH 023/200] remove unneeded aliases --- cloud/amazon/ec2_elb_lb.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 872ee2bedc7..9e6ef2ce51a 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -106,7 +106,6 @@ options: description: - ELB connections from clients and to servers are timed out after this amount of time required: false - aliases: [] version_added: "2.0" cross_az_load_balancing: description: From 6fc58855b931a29c2ec94efec1eacde2fd20a07d Mon Sep 17 00:00:00 2001 From: whiter Date: Thu, 15 Oct 2015 13:21:05 +1100 Subject: [PATCH 024/200] Allow iam_policy to maintain idempotentce if the role referenced has been removed --- cloud/amazon/iam_policy.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index 0d2ed506457..eeab1a7acd5 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -183,6 +183,14 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state): current_policies = [cp for cp in iam.list_role_policies(name). list_role_policies_result. policy_names] + except boto.exception.BotoServerError as e: + if e.error_code == "NoSuchEntity": + # Role doesn't exist so it's safe to assume the policy doesn't either + module.exit_json(changed=False) + else: + module.fail_json(e.message) + + try: for pol in current_policies: if urllib.unquote(iam.get_role_policy(name, pol). get_role_policy_result.policy_document) == pdoc: From 7b9c326ca635b06d617923caf67570c5ef027565 Mon Sep 17 00:00:00 2001 From: Evan Carter Date: Thu, 15 Oct 2015 15:06:32 -0400 Subject: [PATCH 025/200] add documentation stating that JSON files can be loaded with include_vars --- utilities/logic/include_vars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utilities/logic/include_vars.py b/utilities/logic/include_vars.py index a6b2b5b152f..fb55ab5515f 100644 --- a/utilities/logic/include_vars.py +++ b/utilities/logic/include_vars.py @@ -14,7 +14,7 @@ author: "Benno Joy (@bennojoy)" module: include_vars short_description: Load variables from files, dynamically within a task. description: - - Loads variables from a YAML file dynamically during task runtime. It can work with conditionals, or use host specific variables to determine the path name to load from. + - Loads variables from a YAML/JSON file dynamically during task runtime. It can work with conditionals, or use host specific variables to determine the path name to load from. options: free-form: description: From 3016d360f4accac987938f72c7d5c82acba3ae10 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 19 Oct 2015 16:38:15 -0400 Subject: [PATCH 026/200] better handling of checkmode for solaris fixes #2296 --- system/user.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/system/user.py b/system/user.py index 499228953b2..c2e4956f897 100755 --- a/system/user.py +++ b/system/user.py @@ -1352,20 +1352,21 @@ class SunOS(User): cmd.append('-s') cmd.append(self.shell) - if self.module.check_mode: - return (0, '', '') - else: - # modify the user if cmd will do anything - if cmd_len != len(cmd): + # modify the user if cmd will do anything + if cmd_len != len(cmd): + (rc, out, err) = (0, '', '') + if not self.module.check_mode: cmd.append(self.name) (rc, out, err) = self.execute_command(cmd) if rc is not None and rc != 0: self.module.fail_json(name=self.name, msg=err, rc=rc) - else: - (rc, out, err) = (None, '', '') + else: + (rc, out, err) = (None, '', '') - # we have to set the password by editing the /etc/shadow file - if self.update_password == 'always' and self.password is not None and info[1] != self.password: + # we have to set the password by editing the /etc/shadow file + if self.update_password == 'always' and self.password is not None and info[1] != self.password: + (rc, out, err) = (0, '', '') + if not self.module.check_mode: try: lines = [] for line in open(self.SHADOWFILE, 'rb').readlines(): @@ -1382,7 +1383,7 @@ class SunOS(User): except Exception, err: self.module.fail_json(msg="failed to update users password: %s" % str(err)) - return (rc, out, err) + return (rc, out, err) # =========================================== class DarwinUser(User): From 735eefb2ca44419e368e67b45d40787927238f37 Mon Sep 17 00:00:00 2001 From: Kevin Falcone Date: Wed, 21 Oct 2015 16:43:50 -0400 Subject: [PATCH 027/200] Mark this as a string so it is rendered in the docs When this was treated as a boolean, sphinx was leaving the Default column on http://docs.ansible.com/ansible/ec2_module.html blank, implying it would use AWS's default. In reality, it passes False, which overrides the defaults at AWS (it's possible to boot an instance which AWS claims will always have EBS optimization without it because of this silently passed False). --- cloud/amazon/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index ed36b855480..256c16decfd 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -225,7 +225,7 @@ options: description: - whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) required: false - default: false + default: 'false' exact_count: version_added: "1.5" description: From acdde856c5908f61dc4f6d75e8a9d42bc51c3ee8 Mon Sep 17 00:00:00 2001 From: Lee Hardy Date: Thu, 22 Oct 2015 13:45:50 +0100 Subject: [PATCH 028/200] - mysql: add user_anonymous parameter, which interacts with anonymous users - mysql; add host_all parameter, which forces iteration over all 'user'@... matches --- database/mysql/mysql_user.py | 166 ++++++++++++++++++++++++----------- 1 file changed, 113 insertions(+), 53 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 1ea54b41b3a..acf093f8490 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -30,6 +30,13 @@ options: description: - name of the user (role) to add or remove required: true + user_anonymous: + description: + - username is to be ignored and anonymous users with no username + handled + required: false + choices: [ "yes", "no" ] + default: no password: description: - set the user's password @@ -40,6 +47,14 @@ options: - the 'host' part of the MySQL username required: false default: localhost + host_all: + description: + - override the host option, making ansible apply changes to + all hostnames for a given user. This option cannot be used + when creating users + required: false + choices: [ "yes", "no" ] + default: "no" login_user: description: - The username used to authenticate with @@ -133,9 +148,12 @@ EXAMPLES = """ # Modify user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself. - mysql_user: name=bob append_privs=true priv=*.*:REQUIRESSL state=present -# Ensure no user named 'sally' exists, also passing in the auth credentials. +# Ensure no user named 'sally'@'localhost' exists, also passing in the auth credentials. - mysql_user: login_user=root login_password=123456 name=sally state=absent +# Ensure no user named 'sally' exists at all +- mysql_user: name=sally host_all=yes state=absent + # Specify grants composed of more than one word - mysql_user: name=replication password=12345 priv=*.*:"REPLICATION CLIENT" state=present @@ -206,71 +224,104 @@ def connect(module, login_user=None, login_password=None, config_file=''): db_connection = MySQLdb.connect(**config) return db_connection.cursor() -def user_exists(cursor, user, host): - cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host)) +def user_exists(cursor, user, host, host_all): + if host_all: + cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host)) + else: + cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host)) + count = cursor.fetchone() return count[0] > 0 -def user_add(cursor, user, host, password, new_priv): +def user_add(cursor, user, host, host_all, password, new_priv): + # we cannot create users without a proper hostname + if host_all: + return False + cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user,host,password)) if new_priv is not None: for db_table, priv in new_priv.iteritems(): privileges_grant(cursor, user,host,db_table,priv) return True -def user_mod(cursor, user, host, password, new_priv, append_privs): +def user_mod(cursor, user, host, host_all, password, new_priv, append_privs): changed = False grant_option = False - # Handle passwords - if password is not None: - cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host)) - current_pass_hash = cursor.fetchone() - cursor.execute("SELECT PASSWORD(%s)", (password,)) - new_pass_hash = cursor.fetchone() - if current_pass_hash[0] != new_pass_hash[0]: - cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password)) - changed = True - - # Handle privileges - if new_priv is not None: - curr_priv = privileges_get(cursor, user,host) - - # If the user has privileges on a db.table that doesn't appear at all in - # the new specification, then revoke all privileges on it. - for db_table, priv in curr_priv.iteritems(): - # If the user has the GRANT OPTION on a db.table, revoke it first. - if "GRANT" in priv: - grant_option = True - if db_table not in new_priv: - if user != "root" and "PROXY" not in priv and not append_privs: - privileges_revoke(cursor, user,host,db_table,priv,grant_option) - changed = True - - # If the user doesn't currently have any privileges on a db.table, then - # we can perform a straight grant operation. - for db_table, priv in new_priv.iteritems(): - if db_table not in curr_priv: - privileges_grant(cursor, user,host,db_table,priv) + # to simplify code, if we have a specific host and no host_all, we create + # a list with just host and loop over that + if host_all: + hostnames = user_get_hostnames(cursor, user) + else: + hostnames = [host] + + for host in hostnames: + # Handle passwords + if password is not None: + cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host)) + current_pass_hash = cursor.fetchone() + cursor.execute("SELECT PASSWORD(%s)", (password,)) + new_pass_hash = cursor.fetchone() + if current_pass_hash[0] != new_pass_hash[0]: + cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password)) changed = True - # If the db.table specification exists in both the user's current privileges - # and in the new privileges, then we need to see if there's a difference. - db_table_intersect = set(new_priv.keys()) & set(curr_priv.keys()) - for db_table in db_table_intersect: - priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table]) - if (len(priv_diff) > 0): - if not append_privs: - privileges_revoke(cursor, user,host,db_table,curr_priv[db_table],grant_option) - privileges_grant(cursor, user,host,db_table,new_priv[db_table]) - changed = True + # Handle privileges + if new_priv is not None: + curr_priv = privileges_get(cursor, user,host) + + # If the user has privileges on a db.table that doesn't appear at all in + # the new specification, then revoke all privileges on it. + for db_table, priv in curr_priv.iteritems(): + # If the user has the GRANT OPTION on a db.table, revoke it first. + if "GRANT" in priv: + grant_option = True + if db_table not in new_priv: + if user != "root" and "PROXY" not in priv and not append_privs: + privileges_revoke(cursor, user,host,db_table,priv,grant_option) + changed = True + + # If the user doesn't currently have any privileges on a db.table, then + # we can perform a straight grant operation. + for db_table, priv in new_priv.iteritems(): + if db_table not in curr_priv: + privileges_grant(cursor, user,host,db_table,priv) + changed = True + + # If the db.table specification exists in both the user's current privileges + # and in the new privileges, then we need to see if there's a difference. + db_table_intersect = set(new_priv.keys()) & set(curr_priv.keys()) + for db_table in db_table_intersect: + priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table]) + if (len(priv_diff) > 0): + if not append_privs: + privileges_revoke(cursor, user,host,db_table,curr_priv[db_table],grant_option) + privileges_grant(cursor, user,host,db_table,new_priv[db_table]) + changed = True return changed -def user_delete(cursor, user, host): - cursor.execute("DROP USER %s@%s", (user, host)) +def user_delete(cursor, user, host, host_all): + if host_all: + hostnames = user_get_hostnames(cursor, user) + + for hostname in hostnames: + cursor.execute("DROP USER %s@%s", (user, hostname)) + else: + cursor.execute("DROP USER %s@%s", (user, host)) + return True +def user_get_hostnames(cursor, user): + cursor.execute("SELECT Host FROM mysql.user WHERE user = %s", user) + hostnames_raw = cursor.fetchall() + hostnames = [] + + for hostname_raw in hostnames_raw: + hostnames.append(hostname_raw[0]) + + return hostnames + def privileges_get(cursor, user,host): """ MySQL doesn't have a better method of getting privileges aside from the SHOW GRANTS query syntax, which requires us to then parse the returned string. @@ -387,8 +438,10 @@ def main(): login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), user=dict(required=True, aliases=['name']), + user_anonymous=dict(type="bool", default="no"), password=dict(default=None, no_log=True), host=dict(default="localhost"), + host_all=dict(type="bool", default="no"), state=dict(default="present", choices=["absent", "present"]), priv=dict(default=None), append_privs=dict(default=False, type='bool'), @@ -400,8 +453,10 @@ def main(): login_user = module.params["login_user"] login_password = module.params["login_password"] user = module.params["user"] + user_anonymous = module.params["user_anonymous"] password = module.params["password"] host = module.params["host"].lower() + host_all = module.params["host_all"] state = module.params["state"] priv = module.params["priv"] check_implicit_admin = module.params['check_implicit_admin'] @@ -409,6 +464,9 @@ def main(): append_privs = module.boolean(module.params["append_privs"]) update_password = module.params['update_password'] + if user_anonymous: + user = '' + config_file = os.path.expanduser(os.path.expandvars(config_file)) if not mysqldb_found: module.fail_json(msg="the python mysqldb module is required") @@ -433,25 +491,27 @@ def main(): module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials. Exception message: %s" % e) if state == "present": - if user_exists(cursor, user, host): + if user_exists(cursor, user, host, host_all): try: if update_password == 'always': - changed = user_mod(cursor, user, host, password, priv, append_privs) + changed = user_mod(cursor, user, host, host_all, password, priv, append_privs) else: - changed = user_mod(cursor, user, host, None, priv, append_privs) + changed = user_mod(cursor, user, host, host_all, None, priv, append_privs) except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: module.fail_json(msg=str(e)) else: if password is None: module.fail_json(msg="password parameter required when adding a user") + if host_all: + module.fail_json(msg="host_all parameter cannot be used when adding a user") try: - changed = user_add(cursor, user, host, password, priv) + changed = user_add(cursor, user, host, host_all, password, priv) except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: module.fail_json(msg=str(e)) elif state == "absent": - if user_exists(cursor, user, host): - changed = user_delete(cursor, user, host) + if user_exists(cursor, user, host, host_all): + changed = user_delete(cursor, user, host, host_all) else: changed = False module.exit_json(changed=changed, user=user) From fb10161510996949e990e2ea7b84cc98fc9989df Mon Sep 17 00:00:00 2001 From: Ryan Sydnor Date: Tue, 13 Oct 2015 21:49:54 -0400 Subject: [PATCH 029/200] Add capability for stat module to use more hash algorithms Specifically, the stat module now has a checksum_algorithm parameter. This lets the module utilize one of the hash algorithms available on the host to return the checksum of the file. This change is backwards compatible. The checksum_algorithm defaults to sha1 and still returns its result to the stat.checksum property. --- files/stat.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/files/stat.py b/files/stat.py index 8f2bd289bc0..61c77a2ef31 100644 --- a/files/stat.py +++ b/files/stat.py @@ -42,11 +42,18 @@ options: aliases: [] get_checksum: description: - - Whether to return a checksum of the file (currently sha1) + - Whether to return a checksum of the file (default sha1) required: false default: yes aliases: [] version_added: "1.8" + checksum_algorithm: + description: + - Algorithm to determine checksum of file. Will throw an error if the host is unable to use specified algorithm. + required: false + choices: [ 'sha1', 'sha224', 'sha256', 'sha384', 'sha512' ] + default: sha1 + version_added: "2.0" author: "Bruce Pennypacker (@bpennypacker)" ''' @@ -84,6 +91,9 @@ EXAMPLES = ''' # Don't do md5 checksum - stat: path=/path/to/myhugefile get_md5=no + +# Use sha256 to calculate checksum +- stat: path=/path/to/something checksum_algorithm=sha256 ''' RETURN = ''' @@ -254,7 +264,7 @@ stat: sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0 checksum: description: hash of the path - returned: success, path exists and user can read stats and path supports hashing + returned: success, path exists, user can read stats, path supports hashing and supplied checksum algorithm is available type: string sample: 50ba294cdf28c0d5bcde25708df53346825a429f pw_name: @@ -281,7 +291,8 @@ def main(): path = dict(required=True), follow = dict(default='no', type='bool'), get_md5 = dict(default='yes', type='bool'), - get_checksum = dict(default='yes', type='bool') + get_checksum = dict(default='yes', type='bool'), + checksum_algorithm = dict(default='sha1', type='str', choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512']) ), supports_check_mode = True ) @@ -291,6 +302,7 @@ def main(): follow = module.params.get('follow') get_md5 = module.params.get('get_md5') get_checksum = module.params.get('get_checksum') + checksum_algorithm = module.params.get('checksum_algorithm') try: if follow: @@ -351,8 +363,7 @@ def main(): d['md5'] = None if S_ISREG(mode) and get_checksum and os.access(path,os.R_OK): - d['checksum'] = module.sha1(path) - + d['checksum'] = module.digest_from_file(path, checksum_algorithm) try: pw = pwd.getpwuid(st.st_uid) @@ -370,4 +381,4 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +main() \ No newline at end of file From a2fe8dba68ae83903c2b84d5099f57af5451a17e Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Fri, 23 Oct 2015 09:44:07 -0400 Subject: [PATCH 030/200] allow os_port to accept a list of security groups with this commit, the `security_groups` attribute for `os_port` will accept either a common-delimited string or ` YAML list. That is, either this: - os_port: [...] security_groups: group1,group2 Or this: - os_port: [...] security_groups: - group1 - group2 --- cloud/openstack/os_port.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/cloud/openstack/os_port.py b/cloud/openstack/os_port.py index 2ee1ab568a6..d218e938b10 100644 --- a/cloud/openstack/os_port.py +++ b/cloud/openstack/os_port.py @@ -61,8 +61,7 @@ options: security_groups: description: - Security group(s) ID(s) or name(s) associated with the port (comma - separated for multiple security groups - no spaces between comma(s) - or YAML list). + separated string or YAML list) required: false default: None no_security_groups: @@ -220,7 +219,7 @@ def _needs_update(module, port, cloud): 'device_id'] compare_dict = ['allowed_address_pairs', 'extra_dhcp_opt'] - compare_comma_separated_list = ['security_groups'] + compare_list = ['security_groups'] for key in compare_simple: if module.params[key] is not None and module.params[key] != port[key]: @@ -229,7 +228,7 @@ def _needs_update(module, port, cloud): if module.params[key] is not None and cmp(module.params[key], port[key]) != 0: return True - for key in compare_comma_separated_list: + for key in compare_list: if module.params[key] is not None and (set(module.params[key]) != set(port[key])): return True @@ -309,7 +308,7 @@ def main(): fixed_ips=dict(default=None), admin_state_up=dict(default=None), mac_address=dict(default=None), - security_groups=dict(default=None), + security_groups=dict(default=None, type='list'), no_security_groups=dict(default=False, type='bool'), allowed_address_pairs=dict(default=None), extra_dhcp_opt=dict(default=None), @@ -336,13 +335,11 @@ def main(): try: cloud = shade.openstack_cloud(**module.params) if module.params['security_groups']: - if type(module.params['security_groups']) == str: - module.params['security_groups'] = module.params[ - 'security_groups'].split(',') # translate security_groups to UUID's if names where provided - module.params['security_groups'] = map( - lambda v: get_security_group_id(module, cloud, v), - module.params['security_groups']) + module.params['security_groups'] = [ + get_security_group_id(module, cloud, v) + for v in module.params['security_groups'] + ] port = None network_id = None From d82460a3728b9208a380ddb9698193722cb4eec8 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Fri, 23 Oct 2015 13:32:37 -0400 Subject: [PATCH 031/200] make os_router return a top level 'id' key make os_router return a top-level 'id' key, much like other os_* resources. --- cloud/openstack/os_router.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/openstack/os_router.py b/cloud/openstack/os_router.py index d8d547f5f1f..d48ed0417f1 100644 --- a/cloud/openstack/os_router.py +++ b/cloud/openstack/os_router.py @@ -335,7 +335,9 @@ def main(): changed = True - module.exit_json(changed=changed, router=router) + module.exit_json(changed=changed, + router=router, + id=router['id']) elif state == 'absent': if not router: From 4072bc1da0010750d7d7dee32a9bd00d5222cc6e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 23 Oct 2015 18:59:05 -0400 Subject: [PATCH 032/200] rearranged systemd check, removed redundant systemctl check fixed unused cmd and state var assignements --- system/service.py | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/system/service.py b/system/service.py index d08b6cd6746..2b8dbb8696c 100644 --- a/system/service.py +++ b/system/service.py @@ -395,7 +395,7 @@ class LinuxService(Service): location = dict() for binary in binaries: - location[binary] = self.module.get_bin_path(binary) + location[binary] = self.module.get_bin_path(binary, opt_dirs=paths) for initdir in initpaths: initscript = "%s/%s" % (initdir,self.name) @@ -403,10 +403,31 @@ class LinuxService(Service): self.svc_initscript = initscript def check_systemd(): - return os.path.exists("/run/systemd/system/") or os.path.exists("/dev/.run/systemd/") or os.path.exists("/dev/.systemd/") + + # tools must be installed + if location.get('systemctl',False): + + # this should show if systemd is the boot init system + # these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html + for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]: + if os.path.exists(canary): + return True + + # If all else fails, check if init is the systemd command, using comm as cmdline could be symlink + try: + f = open('/proc/1/comm', 'r') + except IOError: + # If comm doesn't exist, old kernel, no systemd + return False + + for line in f: + if 'systemd' in line: + return True + + return False # Locate a tool to enable/disable a service - if location.get('systemctl',False) and check_systemd(): + if check_systemd(): # service is managed by systemd self.__systemd_unit = self.name self.svc_cmd = location['systemctl'] @@ -684,7 +705,8 @@ class LinuxService(Service): (rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name)) if not self.name in out: self.module.fail_json(msg="service %s does not support chkconfig" % self.name) - state = out.split()[-1] + #TODO: look back on why this is here + #state = out.split()[-1] # Check if we're already in the correct state if "3:%s" % action in out and "5:%s" % action in out: @@ -946,7 +968,6 @@ class FreeBsdService(Service): self.rcconf_file = rcfile rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments)) - cmd = "%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments) try: rcvars = shlex.split(stdout, comments=True) except: From 06f301b05b384c0a8e81b92c5c2333b2e66e2767 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 26 Oct 2015 08:36:30 -0700 Subject: [PATCH 033/200] Note the difference between yum package groups and environment groups. Fixes https://github.com/ansible/ansible/issues/12873 --- packaging/os/yum.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index bcf9b283a95..1c8f0c2e127 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -130,6 +130,15 @@ notes: that the other packages come from (such as epel-release) then that package needs to be installed in a separate task. This mimics yum's command line behaviour. + - 'Yum itself has two types of groups. "Package groups" are specified in the + rpm itself while "environment groups" are specified in a separate file + (usually by the distribution). Unfortunately, this division becomes + apparent to ansible users because ansible needs to operate on the group + of packages in a single transaction and yum requires groups to be specified + in different ways when used in that way. Package groups are specified as + "@development-tools" and environment groups are "@^gnome-desktop-environment'. + Use the "yum group list" command to see which category of group the group + you want to install falls into.' # informational: requirements for nodes requirements: [ yum ] author: @@ -161,6 +170,9 @@ EXAMPLES = ''' - name: install the 'Development tools' package group yum: name="@Development tools" state=present + +- name: install the 'Gnome desktop' environment group + yum: name="@^gnome-desktop-environment" state=present ''' # 64k. Number of bytes to read at a time when manually downloading pkgs via a url From 2a93f218216a740d705552d62edc898a276ef507 Mon Sep 17 00:00:00 2001 From: Patrick Galbraith Date: Mon, 26 Oct 2015 13:28:10 -0400 Subject: [PATCH 034/200] Fix to issue 12912. Supply 'force' to install of python-apt. --- packaging/os/apt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 1fd770f710e..d99eb85ff7e 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -559,7 +559,7 @@ def main(): if not HAS_PYTHON_APT: try: - module.run_command('apt-get update && apt-get install python-apt -y -q', use_unsafe_shell=True, check_rc=True) + module.run_command('apt-get update && apt-get install python-apt -y -q --force-yes', use_unsafe_shell=True, check_rc=True) global apt, apt_pkg import apt import apt.debfile From 3993f4e9674ad3d325aed1c3ca43f5e2f81c9b9c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 26 Oct 2015 13:01:01 -0700 Subject: [PATCH 035/200] Simplify logic to handle options set to empty string Fixes #2125 --- files/ini_file.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/files/ini_file.py b/files/ini_file.py index fff153af6ad..cb3edb2cff2 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -65,6 +65,12 @@ options: description: - all arguments accepted by the M(file) module also work here required: false + state: + description: + - If set to C(absent) the option or section will be removed if present instead of created. + required: false + default: "present" + choices: [ "present", "absent" ] notes: - While it is possible to add an I(option) without specifying a I(value), this makes no sense. @@ -110,21 +116,14 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese if state == 'absent': - if option is None and value is None: - if cp.has_section(section): - cp.remove_section(section) - changed = True + if option is None: + changed = cp.remove_section(section) else: - if option is not None: - try: - if cp.get(section, option): - cp.remove_option(section, option) - changed = True - except ConfigParser.InterpolationError: - cp.remove_option(section, option) - changed = True - except: - pass + try: + changed = cp.remove_option(section, option) + except ConfigParser.NoSectionError: + # Option isn't present if the section isn't either + pass if state == 'present': @@ -212,4 +211,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() From e101657722fa6457a97141858449b870e269f6af Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Tue, 27 Oct 2015 13:03:51 -0400 Subject: [PATCH 036/200] Update error message to be more explicit --- database/mysql/mysql_variables.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py index d7187e85733..ab4848d6938 100644 --- a/database/mysql/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -244,7 +244,8 @@ def main(): db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql") cursor = db_connection.cursor() except Exception, e: - module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") + errno, errstr = e.args + module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials (ERROR: %s %s)" % (errno, errstr)) mysqlvar_val = getvariable(cursor, mysqlvar) if mysqlvar_val is None: module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False) From 51db236aa7556f68d442386a6ea8f2938dcfc5a6 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Tue, 27 Oct 2015 13:17:24 -0400 Subject: [PATCH 037/200] Update doc to reflect password is required if adding a new user --- database/mysql/mysql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 1ea54b41b3a..3ac7c0890cd 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -32,7 +32,7 @@ options: required: true password: description: - - set the user's password + - set the user's password. (Required when adding a user) required: false default: null host: From eeaeeb5a1ffe81e61197791a4ab3b5e2ac2d1f07 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 27 Oct 2015 12:51:48 -0700 Subject: [PATCH 038/200] Correct typo in yum module docs --- packaging/os/yum.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 1c8f0c2e127..e1e3341a075 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -136,7 +136,7 @@ notes: apparent to ansible users because ansible needs to operate on the group of packages in a single transaction and yum requires groups to be specified in different ways when used in that way. Package groups are specified as - "@development-tools" and environment groups are "@^gnome-desktop-environment'. + "@development-tools" and environment groups are "@^gnome-desktop-environment". Use the "yum group list" command to see which category of group the group you want to install falls into.' # informational: requirements for nodes From 45a9f0b4536b30dd8e796e7c02ba0510fc3ca008 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 28 Oct 2015 09:31:18 -0400 Subject: [PATCH 039/200] since find doesn't make changes, support check mode and gather data for other tasks in check mode --- files/find.py | 1 + 1 file changed, 1 insertion(+) diff --git a/files/find.py b/files/find.py index 04ecddfe607..d6d1d42c4fd 100644 --- a/files/find.py +++ b/files/find.py @@ -267,6 +267,7 @@ def main(): get_checksum = dict(default="False", type='bool'), use_regex = dict(default="False", type='bool'), ), + supports_check_mode=True, ) params = module.params From 22c2789b72c6ed8fa0735fd0aef81858372e1b8e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 28 Oct 2015 08:50:47 -0700 Subject: [PATCH 040/200] Document and return an error if httplib2 >= 0.7 is not present. We can't use httplib2 0.6.x and below because they do not verify TLS certificates and thus are insecure. Fixes #1875 --- network/basics/uri.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index 1b3ace2eccd..5c0907523b8 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -25,6 +25,8 @@ import shutil import tempfile import base64 import datetime +from distutils.version import LooseVersion + try: import json except ImportError: @@ -143,7 +145,8 @@ options: version_added: '1.9.2' # informational: requirements for nodes -requirements: [ urlparse, httplib2 ] +requirements: + - httplib2 >= 0.7.0 author: "Romeo Theriault (@romeotheriault)" ''' @@ -198,11 +201,15 @@ EXAMPLES = ''' ''' -HAS_HTTPLIB2 = True +HAS_HTTPLIB2 = False + try: import httplib2 -except ImportError: - HAS_HTTPLIB2 = False + if LooseVersion(httplib2.__version__) >= LooseVersion('0.7'): + HAS_HTTPLIB2 = True +except ImportError, AttributeError: + # AttributeError if __version__ is not present + pass HAS_URLPARSE = True @@ -382,7 +389,7 @@ def main(): ) if not HAS_HTTPLIB2: - module.fail_json(msg="httplib2 is not installed") + module.fail_json(msg="httplib2 >= 0.7 is not installed") if not HAS_URLPARSE: module.fail_json(msg="urlparse is not installed") From 1766c5082418d31d3c9444abc5df6a36937f3ed2 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Tue, 27 Oct 2015 12:09:01 -0400 Subject: [PATCH 041/200] Update documentation to reflect need for mysql client --- database/mysql/mysql_db.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index 33720f5d4f6..9761271f058 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -85,12 +85,15 @@ notes: - Requires the MySQLdb Python package on the remote host. For Ubuntu, this is as easy as apt-get install python-mysqldb. (See M(apt).) For CentOS/Fedora, this is as easy as yum install MySQL-python. (See M(yum).) + - Requires the mysql command line client. For Centos/Fedora, this is as easy as + yum install mariadb (See M(yum).). For Debian/Ubuntu this is as easy as + apt-get install mariadb-client. (See M(apt).) - Both I(login_password) and I(login_user) are required when you are passing credentials. If none are present, the module will attempt to read the credentials from C(~/.my.cnf), and finally fall back to using the MySQL default login of C(root) with no password. requirements: [ ConfigParser ] -author: "Mark Theunissen (@marktheunissen)" +author: "Ansible Core Team" ''' EXAMPLES = ''' From 43cecd3ceede3102306d56a05d0f93a25917f3ff Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 27 Oct 2015 17:26:51 -0700 Subject: [PATCH 042/200] Use select in wait_for so that we don't get stuck in cornercases: * reading from a socket that gave some data we weren't looking for and then closed. * read from a socket that stays open and never sends data. * reading from a socket that sends data but not the data we're looking for. Fixes #2051 --- utilities/logic/wait_for.py | 100 ++++++++++++++++++++++-------------- 1 file changed, 62 insertions(+), 38 deletions(-) diff --git a/utilities/logic/wait_for.py b/utilities/logic/wait_for.py index 295155f3028..1287d9b6057 100644 --- a/utilities/logic/wait_for.py +++ b/utilities/logic/wait_for.py @@ -18,12 +18,14 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import socket +import binascii import datetime -import time -import sys +import math import re -import binascii +import select +import socket +import sys +import time HAS_PSUTIL = False try: @@ -349,6 +351,10 @@ def main(): state = params['state'] path = params['path'] search_regex = params['search_regex'] + if search_regex is not None: + compiled_search_re = re.compile(search_regex, re.MULTILINE) + else: + compiled_search_re = None if port and path: module.fail_json(msg="port and path parameter can not both be passed to wait_for") @@ -404,55 +410,72 @@ def main(): if path: try: os.stat(path) - if search_regex: - try: - f = open(path) - try: - if re.search(search_regex, f.read(), re.MULTILINE): - break - else: - time.sleep(1) - finally: - f.close() - except IOError: - time.sleep(1) - pass - else: - break except OSError, e: - # File not present - if e.errno == 2: - time.sleep(1) - else: + # If anything except file not present, throw an error + if e.errno != 2: elapsed = datetime.datetime.now() - start module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds) + # file doesn't exist yet, so continue + else: + # File exists. Are there additional things to check? + if not compiled_search_re: + # nope, succeed! + break + try: + f = open(path) + try: + if re.search(compiled_search_re, f.read()): + # String found, success! + break + finally: + f.close() + except IOError: + pass elif port: + alt_connect_timeout = math.ceil((end - datetime.datetime.now()).total_seconds()) try: - s = _create_connection( (host, port), connect_timeout) - if search_regex: + s = _create_connection((host, port), min(connect_timeout, alt_connect_timeout)) + except: + # Failed to connect by connect_timeout. wait and try again + pass + else: + # Connected -- are there additional conditions? + if compiled_search_re: data = '' matched = False - while 1: - data += s.recv(1024) - if not data: + while datetime.datetime.now() < end: + max_timeout = math.ceil((end - datetime.datetime.now()).total_seconds()) + (readable, w, e) = select.select([s], [], [], max_timeout) + if not readable: + # No new data. Probably means our timeout + # expired + continue + response = s.recv(1024) + if not response: + # Server shutdown break - elif re.search(search_regex, data, re.MULTILINE): + data += response + if re.search(compiled_search_re, data): matched = True break + + # Shutdown the client socket + s.shutdown(socket.SHUT_RDWR) + s.close() if matched: - s.shutdown(socket.SHUT_RDWR) - s.close() + # Found our string, success! break else: + # Connection established, success! s.shutdown(socket.SHUT_RDWR) s.close() break - except: - time.sleep(1) - pass - else: - time.sleep(1) - else: + + # Conditions not yet met, wait and try again + time.sleep(1) + + else: # while-else + # Timeout expired elapsed = datetime.datetime.now() - start if port: if search_regex: @@ -485,4 +508,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() From e70002d2f9ec4c2958573463c42bfe8c332bb72b Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Wed, 28 Oct 2015 13:16:25 -0400 Subject: [PATCH 043/200] os_server_facts returns facts about multiple servers have `os_server_facts` call `list_servers` rather than `get_server`, and treat the `server` parameter as a wildcard pattern. This permits one to get facts on a single server: - os_server: server: webserver1 On mutiple servers: - os_server: server: webserver* Or on all servers: - os_server: Introduces a `detailed` parameter to request additional server details at the cost of additional API calls. --- cloud/openstack/os_server_facts.py | 48 ++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/cloud/openstack/os_server_facts.py b/cloud/openstack/os_server_facts.py index 5d61e4c18d3..bd694dbc558 100644 --- a/cloud/openstack/os_server_facts.py +++ b/cloud/openstack/os_server_facts.py @@ -15,6 +15,8 @@ # You should have received a copy of the GNU General Public License # along with this software. If not, see . +import fnmatch + try: import shade from shade import meta @@ -25,36 +27,47 @@ except ImportError: DOCUMENTATION = ''' --- module: os_server_facts -short_description: Retrieve facts about a compute instance +short_description: Retrieve facts about one or more compute instances version_added: "2.0" -author: "Monty Taylor (@emonty)" description: - - Retrieve facts about a server instance from OpenStack. + - Retrieve facts about server instances from OpenStack. notes: - - Facts are placed in the C(openstack) variable. + - This module creates a new top-level C(openstack_servers) fact, which + contains a list of servers. requirements: - "python >= 2.6" - "shade" options: server: description: - - Name or ID of the instance - required: true + - restrict results to servers with names matching + this glob expression (e.g., C). + required: false + default: None + detailed: + description: + - when true, return additional detail about servers at the expense + of additional API calls. + required: false + default: false extends_documentation_fragment: openstack ''' EXAMPLES = ''' -# Gather facts about a previously created server named vm1 +# Gather facts about all servers named C: - os_server_facts: cloud: rax-dfw - server: vm1 -- debug: var=openstack + server: web* +- debug: + var: openstack_servers ''' + def main(): argument_spec = openstack_full_argument_spec( - server=dict(required=True), + server=dict(required=False), + detailed=dict(required=False, type='bool'), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, **module_kwargs) @@ -64,10 +77,16 @@ def main(): try: cloud = shade.openstack_cloud(**module.params) - server = cloud.get_server(module.params['server']) - hostvars = dict(openstack=meta.get_hostvars_from_server( - cloud, server)) - module.exit_json(changed=False, ansible_facts=hostvars) + openstack_servers = cloud.list_servers( + detailed=module.params['detailed']) + + if module.params['server']: + # filter servers by name + pattern = module.params['server'] + openstack_servers = [server for server in openstack_servers + if fnmatch.fnmatch(server['name'], pattern)] + module.exit_json(changed=False, ansible_facts=dict( + openstack_servers=openstack_servers)) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) @@ -77,4 +96,3 @@ from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main() - From 57dcf2c9dd35fb535d77ca4fa30e9fed4be32472 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 28 Oct 2015 14:38:11 -0400 Subject: [PATCH 044/200] @lorin stepping down as maintainer --- cloud/amazon/_ec2_ami_search.py | 1 - cloud/amazon/ec2_eip.py | 1 - cloud/openstack/_keystone_user.py | 2 +- database/postgresql/postgresql_db.py | 2 +- database/postgresql/postgresql_user.py | 2 +- web_infrastructure/htpasswd.py | 2 +- 6 files changed, 4 insertions(+), 6 deletions(-) diff --git a/cloud/amazon/_ec2_ami_search.py b/cloud/amazon/_ec2_ami_search.py index 8ef0c0046ea..5add0260d80 100644 --- a/cloud/amazon/_ec2_ami_search.py +++ b/cloud/amazon/_ec2_ami_search.py @@ -66,7 +66,6 @@ options: default: paravirtual choices: ["paravirtual", "hvm"] -author: Lorin Hochstein ''' EXAMPLES = ''' diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py index 020ec67a497..69d762c8c08 100644 --- a/cloud/amazon/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -61,7 +61,6 @@ options: extends_documentation_fragment: - aws - ec2 -author: "Lorin Hochstein (@lorin) " author: "Rick Mendes (@rickmendes) " notes: - This module will return C(public_ip) on success, which will contain the diff --git a/cloud/openstack/_keystone_user.py b/cloud/openstack/_keystone_user.py index 48cc87b241a..9586b8b70a9 100644 --- a/cloud/openstack/_keystone_user.py +++ b/cloud/openstack/_keystone_user.py @@ -90,7 +90,7 @@ options: requirements: - "python >= 2.6" - python-keystoneclient -author: "Lorin Hochstein (@lorin)" +author: "Ansible Core Team (deprecated)" ''' EXAMPLES = ''' diff --git a/database/postgresql/postgresql_db.py b/database/postgresql/postgresql_db.py index 469d68fa0fa..762cb65e922 100644 --- a/database/postgresql/postgresql_db.py +++ b/database/postgresql/postgresql_db.py @@ -95,7 +95,7 @@ notes: - This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module. requirements: [ psycopg2 ] -author: "Lorin Hochstein (@lorin)" +author: "Ansible Core Team" ''' EXAMPLES = ''' diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index cee5a9ae131..4f2174330f6 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -137,7 +137,7 @@ notes: to all users. You may not specify password or role_attr_flags when the PUBLIC user is specified. requirements: [ psycopg2 ] -author: "Lorin Hochstein (@lorin)" +author: "Ansible Core Team" ''' EXAMPLES = ''' diff --git a/web_infrastructure/htpasswd.py b/web_infrastructure/htpasswd.py index 361a131ef2d..4253f1572ac 100644 --- a/web_infrastructure/htpasswd.py +++ b/web_infrastructure/htpasswd.py @@ -69,7 +69,7 @@ notes: - "On Debian, Ubuntu, or Fedora: install I(python-passlib)." - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)." requires: [ passlib>=1.6 ] -author: "Lorin Hochstein (@lorin)" +author: "Ansible Core Team" """ EXAMPLES = """ From a76184ad1f9bda1bcbbd334864a55c2871c10343 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 28 Oct 2015 14:39:20 -0400 Subject: [PATCH 045/200] @bradobro stepping down as maintainer --- system/authorized_key.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index 361e68cb009..8a97722b222 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -82,7 +82,7 @@ options: version_added: "1.9" description: - "Adds or removes authorized keys for particular user accounts" -author: "Brad Olson (@bradobro)" +author: "Ansible Core Team" ''' EXAMPLES = ''' From 7cb9289197c906162457ac2fdcd6f2f8d0ef1d34 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 28 Oct 2015 14:40:54 -0400 Subject: [PATCH 046/200] Clarity of owner, even though module is deprecated --- cloud/amazon/_ec2_ami_search.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/_ec2_ami_search.py b/cloud/amazon/_ec2_ami_search.py index 5add0260d80..a85bdf00223 100644 --- a/cloud/amazon/_ec2_ami_search.py +++ b/cloud/amazon/_ec2_ami_search.py @@ -66,6 +66,7 @@ options: default: paravirtual choices: ["paravirtual", "hvm"] +author: "Ansible Core Team (deprecated)" ''' EXAMPLES = ''' From 344cf5fc0e2c8637fe9513206b2c843ca60264cf Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 28 Oct 2015 14:47:03 -0400 Subject: [PATCH 047/200] Remove @ralph-tice from maintainership per his request --- cloud/amazon/s3.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index fdeaafd58bd..ada5cd51c84 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -146,7 +146,6 @@ options: requirements: [ "boto" ] author: - "Lester Wade (@lwade)" - - "Ralph Tice (@ralph-tice)" extends_documentation_fragment: aws ''' From 2dd7ac7a4569039087a4d364977457d7daf69aa4 Mon Sep 17 00:00:00 2001 From: "wtanaka.com" Date: Wed, 28 Oct 2015 10:35:51 -1000 Subject: [PATCH 048/200] Update target parameter documentation xz support was only added recently (2015-05-15), so referring to it unqualified in the online documentation is confusing. --- database/mysql/mysql_db.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index 33720f5d4f6..22ae4157b4b 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -79,7 +79,7 @@ options: target: description: - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL - files (C(.sql)) as well as bzip2 (C(.bz2)), gzip (C(.gz)) and xz compressed files are supported. + files (C(.sql)) as well as bzip2 (C(.bz2)), gzip (C(.gz)) and xz (Added in 2.0) compressed files are supported. required: false notes: - Requires the MySQLdb Python package on the remote host. For Ubuntu, this From 7f59773460d79b3dae34c375ba68caea1bfc09a8 Mon Sep 17 00:00:00 2001 From: Ales Nosek Date: Wed, 28 Oct 2015 22:04:32 -0700 Subject: [PATCH 049/200] ini_file should only change what was specified and nothing more #5860 See also: http://alesnosek.com/blog/2015/08/03/improving-ansibles-ini-file-module/ --- files/ini_file.py | 127 +++++++++++++++++++++++----------------------- 1 file changed, 63 insertions(+), 64 deletions(-) diff --git a/files/ini_file.py b/files/ini_file.py index cb3edb2cff2..5d6df779cf0 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- # (c) 2012, Jan-Piet Mens +# (c) 2015, Ales Nosek # # This file is part of Ansible # @@ -28,7 +29,7 @@ description: - Manage (add, remove, change) individual settings in an INI-style file without having to manage the file as a whole with, say, M(template) or M(assemble). Adds missing sections if they don't exist. - - Comments are discarded when the source file is read, and therefore will not + - Comments are discarded when the source file is read, and therefore will not show up in the destination file. version_added: "0.9" options: @@ -79,7 +80,7 @@ notes: Either use M(template) to create a base INI file with a C([default]) section, or use M(lineinfile) to add the missing line. requirements: [ ConfigParser ] -author: "Jan-Piet Mens (@jpmens)" +author: "Jan-Piet Mens (@jpmens), Ales Nosek" ''' EXAMPLES = ''' @@ -101,79 +102,77 @@ import sys def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False): - changed = False - if (sys.version_info[0] == 2 and sys.version_info[1] >= 7) or sys.version_info[0] >= 3: - cp = ConfigParser.ConfigParser(allow_no_value=True) - else: - cp = ConfigParser.ConfigParser() - cp.optionxform = identity - - try: - f = open(filename) - cp.readfp(f) - except IOError: - pass - - - if state == 'absent': - if option is None: - changed = cp.remove_section(section) - else: - try: - changed = cp.remove_option(section, option) - except ConfigParser.NoSectionError: - # Option isn't present if the section isn't either - pass - - if state == 'present': - # DEFAULT section is always there by DEFAULT, so never try to add it. - if not cp.has_section(section) and section.upper() != 'DEFAULT': + with open(filename, 'r') as ini_file: + ini_lines = ini_file.readlines() + # append a fake section line to simplify the logic + ini_lines.append('[') - cp.add_section(section) - changed = True + within_section = not section + section_start = 0 + changed = False - if option is not None and value is not None: - try: - oldvalue = cp.get(section, option) - if str(value) != str(oldvalue): - cp.set(section, option, value) + for index, line in enumerate(ini_lines): + if line.startswith('[%s]' % section): + within_section = True + section_start = index + elif line.startswith('['): + if within_section: + if state == 'present': + # insert missing option line at the end of the section + ini_lines.insert(index, '%s = %s\n' % (option, value)) + changed = True + elif state == 'absent' and not option: + # remove the entire section + del ini_lines[section_start:index] changed = True - except ConfigParser.NoSectionError: - cp.set(section, option, value) - changed = True - except ConfigParser.NoOptionError: - cp.set(section, option, value) - changed = True - except ConfigParser.InterpolationError: - cp.set(section, option, value) - changed = True + break + else: + if within_section and option: + if state == 'present': + # change the existing option line + if re.match('%s *=' % option, line) \ + or re.match('# *%s *=' % option, line) \ + or re.match('; *%s *=' % option, line): + newline = '%s = %s\n' % (option, value) + changed = ini_lines[index] != newline + ini_lines[index] = newline + if changed: + # remove all possible option occurences from the rest of the section + index = index + 1 + while index < len(ini_lines): + line = ini_lines[index] + if line.startswith('['): + break + if re.match('%s *=' % option, line): + del ini_lines[index] + else: + index = index + 1 + break + else: + # comment out the existing option line + if re.match('%s *=' % option, line): + ini_lines[index] = '#%s' % ini_lines[index] + changed = True + break + + # remove the fake section line + del ini_lines[-1:] + + if not within_section and option and state == 'present': + ini_lines.append('[%s]\n' % section) + ini_lines.append('%s = %s\n' % (option, value)) + changed = True + if changed and not module.check_mode: if backup: module.backup_local(filename) - - try: - f = open(filename, 'w') - cp.write(f) - except: - module.fail_json(msg="Can't create %s" % filename) + with open(filename, 'w') as ini_file: + ini_file.writelines(ini_lines) return changed -# ============================================================== -# identity - -def identity(arg): - """ - This function simply returns its argument. It serves as a - replacement for ConfigParser.optionxform, which by default - changes arguments to lower case. The identity function is a - better choice than str() or unicode(), because it is - encoding-agnostic. - """ - return arg - # ============================================================== # main From 22790d301a41df78eb756c8e0da9526984c68475 Mon Sep 17 00:00:00 2001 From: Ales Nosek Date: Fri, 30 Oct 2015 21:57:25 -0700 Subject: [PATCH 050/200] Make the syntax work with Python 2.4 --- files/ini_file.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/files/ini_file.py b/files/ini_file.py index 5d6df779cf0..d837c329d4b 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -103,10 +103,13 @@ import sys def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False): - with open(filename, 'r') as ini_file: + ini_file = open(filename, 'r') + try: ini_lines = ini_file.readlines() # append a fake section line to simplify the logic ini_lines.append('[') + finally: + ini_file.close() within_section = not section section_start = 0 @@ -168,8 +171,11 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese if changed and not module.check_mode: if backup: module.backup_local(filename) - with open(filename, 'w') as ini_file: + ini_file = open(filename, 'w') + try: ini_file.writelines(ini_lines) + finally: + ini_file.close() return changed From 89957eed537b08001ea171ba6a4ead41d31ab983 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 31 Oct 2015 14:24:32 -0400 Subject: [PATCH 051/200] document mysql collation can only be set during creation --- database/mysql/mysql_db.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index 33720f5d4f6..8927a1bc652 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -68,7 +68,7 @@ options: choices: [ "present", "absent", "dump", "import" ] collation: description: - - Collation mode + - Collation mode (sorting). This only applies to new table/databases and does not update existing ones, this is a limitation of MySQL. required: false default: null encoding: From d192e2c3e32575713d94a8f7fd19c4d9980a0e90 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 31 Oct 2015 21:35:48 -0400 Subject: [PATCH 052/200] code cleanup and reoorg, renamed vars and functions to actual purpose reneabled logging of steps --- utilities/logic/async_wrapper.py | 214 ++++++++++++++++--------------- 1 file changed, 108 insertions(+), 106 deletions(-) diff --git a/utilities/logic/async_wrapper.py b/utilities/logic/async_wrapper.py index 2bc2dc21823..55f5283ed79 100644 --- a/utilities/logic/async_wrapper.py +++ b/utilities/logic/async_wrapper.py @@ -27,15 +27,20 @@ import shlex import os import subprocess import sys -import datetime import traceback import signal import time import syslog + +syslog.openlog('ansible-%s' % os.path.basename(__file__)) +syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:])) + +def notice(msg): + syslog.syslog(syslog.LOG_NOTICE, msg) + def daemonize_self(): # daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012 - # logger.info("cobblerd started") try: pid = os.fork() if pid > 0: @@ -65,50 +70,21 @@ def daemonize_self(): os.dup2(dev_null.fileno(), sys.stdout.fileno()) os.dup2(dev_null.fileno(), sys.stderr.fileno()) -if len(sys.argv) < 3: - print json.dumps({ - "failed" : True, - "msg" : "usage: async_wrapper . Humans, do not call directly!" - }) - sys.exit(1) - -jid = "%s.%d" % (sys.argv[1], os.getpid()) -time_limit = sys.argv[2] -wrapped_module = sys.argv[3] -argsfile = sys.argv[4] -cmd = "%s %s" % (wrapped_module, argsfile) -syslog.openlog('ansible-%s' % os.path.basename(__file__)) -syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:])) - -# setup logging directory -logdir = os.path.expanduser("~/.ansible_async") -log_path = os.path.join(logdir, jid) - -if not os.path.exists(logdir): - try: - os.makedirs(logdir) - except: - print json.dumps({ - "failed" : 1, - "msg" : "could not create: %s" % logdir - }) +def _run_module(wrapped_cmd, jid, job_path): -def _run_command(wrapped_cmd, jid, log_path): - - logfile = open(log_path, "w") - logfile.write(json.dumps({ "started" : 1, "ansible_job_id" : jid })) - logfile.close() - logfile = open(log_path, "w") + jobfile = open(job_path, "w") + jobfile.write(json.dumps({ "started" : 1, "ansible_job_id" : jid })) + jobfile.close() + jobfile = open(job_path, "w") result = {} outdata = '' try: cmd = shlex.split(wrapped_cmd) - script = subprocess.Popen(cmd, shell=False, - stdin=None, stdout=logfile, stderr=logfile) + script = subprocess.Popen(cmd, shell=False, stdin=None, stdout=jobfile, stderr=jobfile) script.communicate() - outdata = file(log_path).read() + outdata = file(job_path).read() result = json.loads(outdata) except (OSError, IOError), e: @@ -118,83 +94,109 @@ def _run_command(wrapped_cmd, jid, log_path): "msg": str(e), } result['ansible_job_id'] = jid - logfile.write(json.dumps(result)) + jobfile.write(json.dumps(result)) except: result = { "failed" : 1, "cmd" : wrapped_cmd, - "data" : outdata, # temporary debug only + "data" : outdata, # temporary notice only "msg" : traceback.format_exc() } result['ansible_job_id'] = jid - logfile.write(json.dumps(result)) - logfile.close() + jobfile.write(json.dumps(result)) + jobfile.close() -# immediately exit this process, leaving an orphaned process -# running which immediately forks a supervisory timing process -#import logging -#import logging.handlers +#################### +## main ## +#################### +if __name__ == '__main__': -#logger = logging.getLogger("ansible_async") -#logger.setLevel(logging.WARNING) -#logger.addHandler( logging.handlers.SysLogHandler("/dev/log") ) -def debug(msg): - #logger.warning(msg) - pass + if len(sys.argv) < 3: + print json.dumps({ + "failed" : True, + "msg" : "usage: async_wrapper . Humans, do not call directly!" + }) + sys.exit(1) -try: - pid = os.fork() - if pid: - # Notify the overlord that the async process started - - # we need to not return immmediately such that the launched command has an attempt - # to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile) - # this probably could be done with some IPC later. Modules should always read - # the argsfile at the very first start of their execution anyway - time.sleep(1) - debug("Return async_wrapper task started.") - print json.dumps({ "started" : 1, "ansible_job_id" : jid, "results_file" : log_path }) - sys.stdout.flush() - sys.exit(0) - else: - # The actual wrapper process - - # Daemonize, so we keep on running - daemonize_self() - - # we are now daemonized, create a supervisory process - debug("Starting module and watcher") - - sub_pid = os.fork() - if sub_pid: - # the parent stops the process after the time limit - remaining = int(time_limit) - - # set the child process group id to kill all children - os.setpgid(sub_pid, sub_pid) - - debug("Start watching %s (%s)"%(sub_pid, remaining)) - time.sleep(5) - while os.waitpid(sub_pid, os.WNOHANG) == (0, 0): - debug("%s still running (%s)"%(sub_pid, remaining)) - time.sleep(5) - remaining = remaining - 5 - if remaining <= 0: - debug("Now killing %s"%(sub_pid)) - os.killpg(sub_pid, signal.SIGKILL) - debug("Sent kill to group %s"%sub_pid) - time.sleep(1) - sys.exit(0) - debug("Done in kid B.") - os._exit(0) - else: - # the child process runs the actual module - debug("Start module (%s)"%os.getpid()) - _run_command(cmd, jid, log_path) - debug("Module complete (%s)"%os.getpid()) - sys.exit(0) + jid = "%s.%d" % (sys.argv[1], os.getpid()) + time_limit = sys.argv[2] + wrapped_module = sys.argv[3] + argsfile = sys.argv[4] + cmd = "%s %s" % (wrapped_module, argsfile) + step = 5 + + # setup job output directory + jobdir = os.path.expanduser("~/.ansible_async") + job_path = os.path.join(jobdir, jid) + + if not os.path.exists(jobdir): + try: + os.makedirs(jobdir) + except: + print json.dumps({ + "failed" : 1, + "msg" : "could not create: %s" % jobdir + }) + # immediately exit this process, leaving an orphaned process + # running which immediately forks a supervisory timing process -except Exception, err: - debug("error: %s"%(err)) - raise err + try: + pid = os.fork() + if pid: + # Notify the overlord that the async process started + + # we need to not return immmediately such that the launched command has an attempt + # to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile) + # this probably could be done with some IPC later. Modules should always read + # the argsfile at the very first start of their execution anyway + notice("Return async_wrapper task started.") + print json.dumps({ "started" : 1, "ansible_job_id" : jid, "results_file" : job_path }) + sys.stdout.flush() + time.sleep(1) + sys.exit(0) + else: + # The actual wrapper process + + # Daemonize, so we keep on running + daemonize_self() + + # we are now daemonized, create a supervisory process + notice("Starting module and watcher") + + sub_pid = os.fork() + if sub_pid: + # the parent stops the process after the time limit + remaining = int(time_limit) + + # set the child process group id to kill all children + os.setpgid(sub_pid, sub_pid) + + notice("Start watching %s (%s)"%(sub_pid, remaining)) + time.sleep(step) + while os.waitpid(sub_pid, os.WNOHANG) == (0, 0): + notice("%s still running (%s)"%(sub_pid, remaining)) + time.sleep(step) + remaining = remaining - step + if remaining <= 0: + notice("Now killing %s"%(sub_pid)) + os.killpg(sub_pid, signal.SIGKILL) + notice("Sent kill to group %s"%sub_pid) + time.sleep(1) + sys.exit(0) + notice("Done in kid B.") + sys.exit(0) + else: + # the child process runs the actual module + notice("Start module (%s)"%os.getpid()) + _run_module(cmd, jid, job_path) + notice("Module complete (%s)"%os.getpid()) + sys.exit(0) + + except Exception, err: + notice("error: %s"%(err)) + print json.dumps({ + "failed" : True, + "msg" : "FATAL ERROR: %s" % str(err) + }) + sys.exit(1) From b90318ae6ce84789d819f0a4a76a45937d4c3e8c Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 29 Oct 2015 17:16:23 -0400 Subject: [PATCH 053/200] loop to get all load balancers, boto limited to 400 at a time fixes #2115 --- cloud/amazon/ec2_elb.py | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py index 9f333764a5d..7e383d1539d 100644 --- a/cloud/amazon/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -50,10 +50,10 @@ options: choices: [ "yes", "no" ] wait: description: - - Wait for instance registration or deregistration to complete successfully before returning. + - Wait for instance registration or deregistration to complete successfully before returning. required: false default: yes - choices: [ "yes", "no" ] + choices: [ "yes", "no" ] validate_certs: description: - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0. @@ -87,7 +87,7 @@ roles: - myrole post_tasks: - name: Instance Register - local_action: + local_action: module: ec2_elb instance_id: "{{ ansible_ec2_instance_id }}" ec2_elbs: "{{ item }}" @@ -256,12 +256,23 @@ class ElbManager: ec2_elbs = self._get_auto_scaling_group_lbs() try: - elb = connect_to_aws(boto.ec2.elb, self.region, - **self.aws_connect_params) + elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) - elbs = elb.get_all_load_balancers() + elbs = [] + marker = None + while True: + try: + newelbs = elb.get_all_load_balancers(marker=marker) + if not newelbs.is_truncated: + break + elbs.extend(newelbs) + marker = newelbs.next_marker + except TypeError: + # Older version of boto do not allow for params + elbs = elb.get_all_load_balancers() + break if ec2_elbs: lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs) @@ -302,8 +313,7 @@ class ElbManager: def _get_instance(self): """Returns a boto.ec2.InstanceObject for self.instance_id""" try: - ec2 = connect_to_aws(boto.ec2, self.region, - **self.aws_connect_params) + ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) return ec2.get_only_instances(instance_ids=[self.instance_id])[0] @@ -330,7 +340,7 @@ def main(): region, ec2_url, aws_connect_params = get_aws_connection_info(module) - if not region: + if not region: module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") ec2_elbs = module.params['ec2_elbs'] @@ -342,8 +352,7 @@ def main(): module.fail_json(msg="ELBs are required for registration") instance_id = module.params['instance_id'] - elb_man = ElbManager(module, instance_id, ec2_elbs, - region=region, **aws_connect_params) + elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params) if ec2_elbs is not None: for elb in ec2_elbs: From 794cbeea231aabfdafaf3d5dfcdb706cec037afe Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 2 Nov 2015 13:10:20 -0500 Subject: [PATCH 054/200] use marker instead of is_truncated which does not seem to work --- cloud/amazon/ec2_elb.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py index 7e383d1539d..5b3b24dacc2 100644 --- a/cloud/amazon/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -265,10 +265,10 @@ class ElbManager: while True: try: newelbs = elb.get_all_load_balancers(marker=marker) - if not newelbs.is_truncated: + marker = newelbs.next_marker + if not marker: break elbs.extend(newelbs) - marker = newelbs.next_marker except TypeError: # Older version of boto do not allow for params elbs = elb.get_all_load_balancers() From bf5929d32a877bf4b9f59cf0072efa7cbdf9bf25 Mon Sep 17 00:00:00 2001 From: Michael Schuett Date: Tue, 30 Jun 2015 02:25:28 -0400 Subject: [PATCH 055/200] docker_image TLS Check commit enables using tls when using the docker_image module. It also removes the default for docker_url which doesn't allow us to check for DOCKER_HOST which is a more sane default. This allows you to use docker_image on OSX but more documentation is needed. --- cloud/docker/docker_image.py | 91 ++++++++++++++++++++++++++++++++++-- 1 file changed, 88 insertions(+), 3 deletions(-) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index e6cfd87ab43..92aaa44a499 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -118,6 +118,7 @@ Remove image from local docker storage: ''' import re +import os from urlparse import urlparse try: @@ -161,11 +162,90 @@ class DockerImageManager: self.name = self.module.params.get('name') self.tag = self.module.params.get('tag') self.nocache = self.module.params.get('nocache') - docker_url = urlparse(module.params.get('docker_url')) + + # Connect to the docker server using any configured host and TLS settings. + + env_host = os.getenv('DOCKER_HOST') + env_docker_verify = os.getenv('DOCKER_TLS_VERIFY') + env_cert_path = os.getenv('DOCKER_CERT_PATH') + env_docker_hostname = os.getenv('DOCKER_TLS_HOSTNAME') + + docker_url = module.params.get('docker_url') + if not docker_url: + if env_host: + docker_url = env_host + else: + docker_url = 'unix://var/run/docker.sock' + + docker_api_version = module.params.get('docker_api_version') + + tls_client_cert = module.params.get('tls_client_cert', None) + if not tls_client_cert and env_cert_path: + tls_client_cert = os.path.join(env_cert_path, 'cert.pem') + + tls_client_key = module.params.get('tls_client_key', None) + if not tls_client_key and env_cert_path: + tls_client_key = os.path.join(env_cert_path, 'key.pem') + + tls_ca_cert = module.params.get('tls_ca_cert') + if not tls_ca_cert and env_cert_path: + tls_ca_cert = os.path.join(env_cert_path, 'ca.pem') + + tls_hostname = module.params.get('tls_hostname') + if tls_hostname is None: + if env_docker_hostname: + tls_hostname = env_docker_hostname + else: + parsed_url = urlparse(docker_url) + if ':' in parsed_url.netloc: + tls_hostname = parsed_url.netloc[:parsed_url.netloc.rindex(':')] + else: + tls_hostname = parsed_url + if not tls_hostname: + tls_hostname = True + + # use_tls can be one of four values: + # no: Do not use tls + # encrypt: Use tls. We may do client auth. We will not verify the server + # verify: Use tls. We may do client auth. We will verify the server + # None: Only use tls if the parameters for client auth were specified + # or tls_ca_cert (which requests verifying the server with + # a specific ca certificate) + use_tls = module.params.get('use_tls') + if use_tls is None and env_docker_verify is not None: + use_tls = 'verify' + + tls_config = None + if use_tls != 'no': + params = {} + + # Setup client auth + if tls_client_cert and tls_client_key: + params['client_cert'] = (tls_client_cert, tls_client_key) + + # We're allowed to verify the connection to the server + if use_tls == 'verify' or (use_tls is None and tls_ca_cert): + if tls_ca_cert: + params['ca_cert'] = tls_ca_cert + params['verify'] = True + params['assert_hostname'] = tls_hostname + else: + params['verify'] = True + params['assert_hostname'] = tls_hostname + elif use_tls == 'encrypt': + params['verify'] = False + + if params: + # See https://github.com/docker/docker-py/blob/d39da11/docker/utils/utils.py#L279-L296 + docker_url = docker_url.replace('tcp://', 'https://') + tls_config = docker.tls.TLSConfig(**params) + self.client = docker.Client( base_url=docker_url.geturl(), version=module.params.get('docker_api_version'), - timeout=module.params.get('timeout')) + timeout=module.params.get('timeout'), + tls=tls_config) + self.changed = False self.log = [] self.error_msg = None @@ -244,7 +324,12 @@ def main(): tag = dict(required=False, default="latest"), nocache = dict(default=False, type='bool'), state = dict(default='present', choices=['absent', 'present', 'build']), - docker_url = dict(default='unix://var/run/docker.sock'), + use_tls = dict(default=None, choices=['no', 'encrypt', 'verify']), + tls_client_cert = dict(required=False, default=None, type='str'), + tls_client_key = dict(required=False, default=None, type='str'), + tls_ca_cert = dict(required=False, default=None, type='str'), + tls_hostname = dict(required=False, type='str', default=None), + docker_url = dict(), docker_api_version = dict(required=False, default=DEFAULT_DOCKER_API_VERSION, type='str'), From addbc329beb74b7d0561960d914294824dac9eeb Mon Sep 17 00:00:00 2001 From: Michael Schuett Date: Wed, 1 Jul 2015 19:41:17 -0400 Subject: [PATCH 056/200] Improve Error Reporting This will hopefully help mac users be able to quickly resolve any issues they may find when trying to use this module. --- cloud/docker/docker_image.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index 92aaa44a499..20776ee139c 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -371,6 +371,29 @@ def main(): module.exit_json(failed=failed, changed=manager.has_changed(), msg=msg, image_id=image_id) + except SSLError as e: + if get_platform() == "Darwin" and "DOCKER_HOST" in os.environ: + # Ensure that the environment variables has been set + if "DOCKER_HOST" not in os.environ: + environment_error = ''' + It looks like you have not set your docker environment + variables. Please ensure that you have set the requested + variables as instructed when running boot2docker up. If + they are set in .bash_profile you will need to symlink + it to .bashrc. + ''' + module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) + # If the above is true it's likely the hostname does not match + else: + environment_error = ''' + You may need to ignore hostname missmatches by passing + -e 'host_key_checking=False' through the command line. + ''' + module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) + # General error for non darwin users + else: + module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e)) + except DockerAPIError as e: module.exit_json(failed=True, changed=manager.has_changed(), msg="Docker API error: " + e.explanation) From 0a5b7087bdac11f8eab76b94098d1f1928341851 Mon Sep 17 00:00:00 2001 From: Michael Schuett Date: Wed, 1 Jul 2015 19:43:26 -0400 Subject: [PATCH 057/200] Improve Message Give user a course of action in the case where the suggestions do not work. This will hopefully allow us to work through any further issues much faster. --- cloud/docker/docker_image.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index 20776ee139c..bdc31e71c99 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -388,6 +388,8 @@ def main(): environment_error = ''' You may need to ignore hostname missmatches by passing -e 'host_key_checking=False' through the command line. + If this does not resolve the issue please open an issue + at ansible/ansible-modules-core and ping michaeljs1990 ''' module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) # General error for non darwin users From 1e8d20b0dae125f504e1cccdaef63c4aecd87f16 Mon Sep 17 00:00:00 2001 From: Michael Schuett Date: Wed, 1 Jul 2015 20:15:23 -0400 Subject: [PATCH 058/200] Documentation Fix Updated documentation to match current module state. --- cloud/docker/docker_image.py | 37 +++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index bdc31e71c99..4498998e8fe 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -63,8 +63,43 @@ options: description: - URL of docker host to issue commands to required: false - default: unix://var/run/docker.sock + default: ${DOCKER_HOST} or unix://var/run/docker.sock aliases: [] + use_tls: + description: + - Whether to use tls to connect to the docker server. "no" means not to + use tls (and ignore any other tls related parameters). "encrypt" means + to use tls to encrypt the connection to the server. "verify" means to + also verify that the server's certificate is valid for the server + (this both verifies the certificate against the CA and that the + certificate was issued for that host. If this is unspecified, tls will + only be used if one of the other tls options require it. + choices: [ "no", "encrypt", "verify" ] + version_added: "1.9" + tls_client_cert: + description: + - Path to the PEM-encoded certificate used to authenticate docker client. + If specified tls_client_key must be valid + default: ${DOCKER_CERT_PATH}/cert.pem + version_added: "1.9" + tls_client_key: + description: + - Path to the PEM-encoded key used to authenticate docker client. If + specified tls_client_cert must be valid + default: ${DOCKER_CERT_PATH}/key.pem + version_added: "1.9" + tls_ca_cert: + description: + - Path to a PEM-encoded certificate authority to secure the Docker connection. + This has no effect if use_tls is encrypt. + default: ${DOCKER_CERT_PATH}/ca.pem + version_added: "1.9" + tls_hostname: + description: + - A hostname to check matches what's supplied in the docker server's + certificate. If unspecified, the hostname is taken from the docker_url. + default: Taken from docker_url + version_added: "1.9" docker_api_version: description: - Remote API version to use. This defaults to the current default as From c1264988996fad7d788c07a806a132a9b9ad1761 Mon Sep 17 00:00:00 2001 From: Michael Schuett Date: Thu, 2 Jul 2015 11:45:29 -0400 Subject: [PATCH 059/200] Remove faulty logic Update logic after splitting the error into two separate messages. --- cloud/docker/docker_image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index 4498998e8fe..1800dfa28d9 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -407,7 +407,7 @@ def main(): module.exit_json(failed=failed, changed=manager.has_changed(), msg=msg, image_id=image_id) except SSLError as e: - if get_platform() == "Darwin" and "DOCKER_HOST" in os.environ: + if get_platform() == "Darwin": # Ensure that the environment variables has been set if "DOCKER_HOST" not in os.environ: environment_error = ''' From 1dcb31cad6ae0805ee463228f83973a004e3c7ab Mon Sep 17 00:00:00 2001 From: Michael Schuett Date: Tue, 7 Jul 2015 09:44:27 -0400 Subject: [PATCH 060/200] remove .geturl() Can't call geturl on a string. --- cloud/docker/docker_image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index 1800dfa28d9..6f41755c929 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -276,7 +276,7 @@ class DockerImageManager: tls_config = docker.tls.TLSConfig(**params) self.client = docker.Client( - base_url=docker_url.geturl(), + base_url=docker_url, version=module.params.get('docker_api_version'), timeout=module.params.get('timeout'), tls=tls_config) From b0357bf9e8f27cad04bd3882acab215e0463a0a5 Mon Sep 17 00:00:00 2001 From: Michael Schuett Date: Tue, 7 Jul 2015 15:36:10 -0400 Subject: [PATCH 061/200] Handle connection error Try and help when mac hits a connection error. --- cloud/docker/docker_image.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index 6f41755c929..3807c963de1 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -431,6 +431,20 @@ def main(): else: module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e)) + except ConnectionError as e: + if get_platform() == "Darwin" and "DOCKER_HOST" not in os.environ: + # Ensure that the environment variables has been set + environment_error = ''' + It looks like you have not set your docker environment + variables. Please ensure that you have set the requested + variables as instructed when running boot2docker up. If + they are set in .bash_profile you will need to symlink + it to .bashrc. + ''' + module.exit_json(failed=True, chaged=manager.has_changed(), msg="ConnectionError: " + str(e) + environment_error) + + module.exit_json(failed=True, chaged=manager.has_changed(), msg="ConnectionError: " + str(e)) + except DockerAPIError as e: module.exit_json(failed=True, changed=manager.has_changed(), msg="Docker API error: " + e.explanation) From 80c2e28a48c487caf3f133af81d1d94f410809ea Mon Sep 17 00:00:00 2001 From: Michael Schuett Date: Tue, 7 Jul 2015 15:37:36 -0400 Subject: [PATCH 062/200] Fix message Previous fix did not actual work. This fix does however. --- cloud/docker/docker_image.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index 3807c963de1..327349dc683 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -421,10 +421,10 @@ def main(): # If the above is true it's likely the hostname does not match else: environment_error = ''' - You may need to ignore hostname missmatches by passing - -e 'host_key_checking=False' through the command line. - If this does not resolve the issue please open an issue - at ansible/ansible-modules-core and ping michaeljs1990 + You may need to ignore hostname missmatches by setting + tls_hostname=boot2docker in your role. If this does not + resolve the issue please open an issue at + ansible/ansible-modules-core and ping michaeljs1990 ''' module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) # General error for non darwin users From e318be30ff0a4c19e61ff15437ff96a6ba57f696 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 2 Nov 2015 10:43:33 -0800 Subject: [PATCH 063/200] Fix issues version_added and chaged => changed typo --- cloud/docker/docker_image.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index 327349dc683..a2d0143e284 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -75,31 +75,31 @@ options: certificate was issued for that host. If this is unspecified, tls will only be used if one of the other tls options require it. choices: [ "no", "encrypt", "verify" ] - version_added: "1.9" + version_added: "2.0" tls_client_cert: description: - Path to the PEM-encoded certificate used to authenticate docker client. If specified tls_client_key must be valid default: ${DOCKER_CERT_PATH}/cert.pem - version_added: "1.9" + version_added: "2.0" tls_client_key: description: - Path to the PEM-encoded key used to authenticate docker client. If specified tls_client_cert must be valid default: ${DOCKER_CERT_PATH}/key.pem - version_added: "1.9" + version_added: "2.0" tls_ca_cert: description: - Path to a PEM-encoded certificate authority to secure the Docker connection. This has no effect if use_tls is encrypt. default: ${DOCKER_CERT_PATH}/ca.pem - version_added: "1.9" + version_added: "2.0" tls_hostname: description: - A hostname to check matches what's supplied in the docker server's certificate. If unspecified, the hostname is taken from the docker_url. default: Taken from docker_url - version_added: "1.9" + version_added: "2.0" docker_api_version: description: - Remote API version to use. This defaults to the current default as @@ -417,7 +417,7 @@ def main(): they are set in .bash_profile you will need to symlink it to .bashrc. ''' - module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) + module.exit_json(failed=True, changed=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) # If the above is true it's likely the hostname does not match else: environment_error = ''' @@ -426,10 +426,10 @@ def main(): resolve the issue please open an issue at ansible/ansible-modules-core and ping michaeljs1990 ''' - module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) + module.exit_json(failed=True, changed=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) # General error for non darwin users else: - module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e)) + module.exit_json(failed=True, changed=manager.has_changed(), msg="SSLError: " + str(e)) except ConnectionError as e: if get_platform() == "Darwin" and "DOCKER_HOST" not in os.environ: @@ -441,9 +441,9 @@ def main(): they are set in .bash_profile you will need to symlink it to .bashrc. ''' - module.exit_json(failed=True, chaged=manager.has_changed(), msg="ConnectionError: " + str(e) + environment_error) + module.exit_json(failed=True, changed=manager.has_changed(), msg="ConnectionError: " + str(e) + environment_error) - module.exit_json(failed=True, chaged=manager.has_changed(), msg="ConnectionError: " + str(e)) + module.exit_json(failed=True, changed=manager.has_changed(), msg="ConnectionError: " + str(e)) except DockerAPIError as e: module.exit_json(failed=True, changed=manager.has_changed(), msg="Docker API error: " + e.explanation) From baafcfc091d09f69ef51a9b1cd36ee2fc5169f83 Mon Sep 17 00:00:00 2001 From: Harlan Lieberman-Berg Date: Sat, 15 Aug 2015 11:40:00 +0200 Subject: [PATCH 064/200] Change behavior of apt.py around installing recommended packages. Closes #1189. This will cause the settings in Ansible to override the system settings. That will have no effect except on systems that have an out-of-Ansible configuration that disables automatic installation of recommended packages. Previously, ansible would use the OS default whenever install_recommends wasn't part of the playbook. This change will cause the Ansible default configuration setting of installing recommended packages to override the configuration files set on the OS for things installed through ansible, even when there is no install_recommends specified in the playbook. Because the OS default matches the Ansible default, this shouldn't have wide impact. --- packaging/os/apt.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index d99eb85ff7e..7198d934ca2 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -231,7 +231,7 @@ def package_status(m, pkgname, version, cache, state): provided_packages = cache.get_providing_packages(pkgname) if provided_packages: is_installed = False - # when virtual package providing only one package, look up status of target package + # when virtual package providing only one package, look up status of target package if cache.is_virtual_package(pkgname) and len(provided_packages) == 1: package = provided_packages[0] installed, upgradable, has_files = package_status(m, package.name, version, cache, state='install') @@ -386,7 +386,9 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None, if default_release: cmd += " -t '%s'" % (default_release,) if not install_recommends: - cmd += " --no-install-recommends" + cmd += " -o APT::Install-Recommends=no" + else: + cmd += " -o APT::Install-Recommends=yes" rc, out, err = m.run_command(cmd) if rc: From a234e9b7b2a7b13400023dd3b703ca41f8163715 Mon Sep 17 00:00:00 2001 From: Harlan Lieberman-Berg Date: Sat, 15 Aug 2015 18:41:42 +0200 Subject: [PATCH 065/200] Change install_recommended in apt to a trinary. Conditions are now "yes", "no", and "default", with the latter falling back to the OS default. --- packaging/os/apt.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 7198d934ca2..fe8bbfff00a 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -62,7 +62,7 @@ options: default: null install_recommends: description: - - Corresponds to the C(--no-install-recommends) option for I(apt). Default behavior (C(yes)) replicates apt's default behavior; C(no) does not install recommended packages. Suggested packages are never installed. + - Corresponds to the C(--no-install-recommends) option for I(apt). C(yes) installs recommended packages. C(no) does not install recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed. required: false default: yes choices: [ "yes", "no" ] @@ -339,7 +339,7 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache): return new_pkgspec def install(m, pkgspec, cache, upgrade=False, default_release=None, - install_recommends=True, force=False, + install_recommends, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), build_dep=False): pkg_list = [] @@ -385,9 +385,9 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None, if default_release: cmd += " -t '%s'" % (default_release,) - if not install_recommends: + if install_recommends == 'no': cmd += " -o APT::Install-Recommends=no" - else: + elif install_recommends == 'yes': cmd += " -o APT::Install-Recommends=yes" rc, out, err = m.run_command(cmd) @@ -549,7 +549,7 @@ def main(): package = dict(default=None, aliases=['pkg', 'name'], type='list'), deb = dict(default=None), default_release = dict(default=None, aliases=['default-release']), - install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'), + install_recommends = dict(default='default', aliases=['install-recommends'], choices=['default', 'yes', 'no'), force = dict(default='no', type='bool'), upgrade = dict(choices=['no', 'yes', 'safe', 'full', 'dist']), dpkg_options = dict(default=DPKG_OPTIONS) From 06a4efa1cf40daf27cf180a0d07646f4d921d15e Mon Sep 17 00:00:00 2001 From: Harlan Lieberman-Berg Date: Sat, 15 Aug 2015 18:45:08 +0200 Subject: [PATCH 066/200] Add missing brace. --- packaging/os/apt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index fe8bbfff00a..16c6a5f83b6 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -549,7 +549,7 @@ def main(): package = dict(default=None, aliases=['pkg', 'name'], type='list'), deb = dict(default=None), default_release = dict(default=None, aliases=['default-release']), - install_recommends = dict(default='default', aliases=['install-recommends'], choices=['default', 'yes', 'no'), + install_recommends = dict(default='default', aliases=['install-recommends'], choices=['default', 'yes', 'no']), force = dict(default='no', type='bool'), upgrade = dict(choices=['no', 'yes', 'safe', 'full', 'dist']), dpkg_options = dict(default=DPKG_OPTIONS) From a53cf5434bfbf4ae975bf9dd27f9d5bd2dd19c60 Mon Sep 17 00:00:00 2001 From: Harlan Lieberman-Berg Date: Sat, 15 Aug 2015 20:00:25 +0200 Subject: [PATCH 067/200] Give include_recommends a useless default to make the parser happy. --- packaging/os/apt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 16c6a5f83b6..cbf0375e473 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -339,7 +339,7 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache): return new_pkgspec def install(m, pkgspec, cache, upgrade=False, default_release=None, - install_recommends, force=False, + install_recommends='default', force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), build_dep=False): pkg_list = [] From 5cacef8617cdf9568134360451cfe8b7b619bbd2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 2 Nov 2015 13:03:18 -0800 Subject: [PATCH 068/200] Fixes for bcoca's review of #1916 --- packaging/os/apt.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index cbf0375e473..b5c363ab1f5 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -64,7 +64,7 @@ options: description: - Corresponds to the C(--no-install-recommends) option for I(apt). C(yes) installs recommended packages. C(no) does not install recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed. required: false - default: yes + default: null choices: [ "yes", "no" ] force: description: @@ -339,7 +339,7 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache): return new_pkgspec def install(m, pkgspec, cache, upgrade=False, default_release=None, - install_recommends='default', force=False, + install_recommends=None, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), build_dep=False): pkg_list = [] @@ -385,10 +385,12 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None, if default_release: cmd += " -t '%s'" % (default_release,) - if install_recommends == 'no': + + if install_recommends is False: cmd += " -o APT::Install-Recommends=no" - elif install_recommends == 'yes': + elif install_recommends is True: cmd += " -o APT::Install-Recommends=yes" + # install_recommends is None uses the OS default rc, out, err = m.run_command(cmd) if rc: @@ -549,7 +551,7 @@ def main(): package = dict(default=None, aliases=['pkg', 'name'], type='list'), deb = dict(default=None), default_release = dict(default=None, aliases=['default-release']), - install_recommends = dict(default='default', aliases=['install-recommends'], choices=['default', 'yes', 'no']), + install_recommends = dict(default=None, aliases=['install-recommends'], type='bool'), force = dict(default='no', type='bool'), upgrade = dict(choices=['no', 'yes', 'safe', 'full', 'dist']), dpkg_options = dict(default=DPKG_OPTIONS) From a38e0095f9c4c141adebba54bebbd55bd39d959f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 2 Nov 2015 18:54:30 -0500 Subject: [PATCH 069/200] added missing version added --- cloud/amazon/elasticache.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index d3f7ff4cdc7..ba8ed455d12 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -48,6 +48,7 @@ options: - The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group for the specified engine will be used. required: false default: none + version_added: "2.0" node_type: description: - The compute and memory capacity of the nodes in the cache cluster From e16c5c54fd87a46bbc7019297a25e0bd98dafe5f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 2 Nov 2015 17:27:20 -0800 Subject: [PATCH 070/200] Revert "Expose cache_parameter_group_name in elasticache module" This PR #1950 implements duplicate functionality to #1353 This reverts commit b04efa22c4403ca869e94e7918721306d23afa8d. Conflicts: cloud/amazon/elasticache.py --- cloud/amazon/elasticache.py | 26 ++++++-------------------- 1 file changed, 6 insertions(+), 20 deletions(-) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index ba8ed455d12..d275ba2be82 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -43,12 +43,6 @@ options: - The version number of the cache engine required: false default: none - cache_parameter_group_name: - description: - - The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group for the specified engine will be used. - required: false - default: none - version_added: "2.0" node_type: description: - The compute and memory capacity of the nodes in the cache cluster @@ -63,9 +57,9 @@ options: - The port number on which each of the cache nodes will accept connections required: false default: none - parameter_group: + cache_parameter_group: description: - - Specify non-default parameter group names to be associated with cache cluster + - The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group for the specified engine will be used. required: false default: None version_added: "2.0" @@ -158,12 +152,11 @@ class ElastiCacheManager(object): def __init__(self, module, name, engine, cache_engine_version, node_type, num_nodes, cache_port, parameter_group, cache_subnet_group, cache_security_groups, security_group_ids, zone, wait, - hard_modify, region, cache_parameter_group_name=None, **aws_connect_kwargs): + hard_modify, region, **aws_connect_kwargs): self.module = module self.name = name self.engine = engine self.cache_engine_version = cache_engine_version - self.cache_parameter_group_name = cache_parameter_group_name self.node_type = node_type self.num_nodes = num_nodes self.cache_port = cache_port @@ -224,7 +217,6 @@ class ElastiCacheManager(object): cache_node_type=self.node_type, engine=self.engine, engine_version=self.cache_engine_version, - cache_parameter_group_name=self.cache_parameter_group_name, cache_security_group_names=self.cache_security_groups, security_group_ids=self.security_group_ids, cache_parameter_group_name=self.parameter_group, @@ -306,8 +298,7 @@ class ElastiCacheManager(object): cache_parameter_group_name=self.parameter_group, security_group_ids=self.security_group_ids, apply_immediately=True, - engine_version=self.cache_engine_version, - cache_parameter_group_name=self.cache_parameter_group_name) + engine_version=self.cache_engine_version) except boto.exception.BotoServerError, e: self.module.fail_json(msg=e.message) @@ -493,7 +484,6 @@ def main(): name={'required': True}, engine={'required': False, 'default': 'memcached'}, cache_engine_version={'required': False}, - cache_parameter_group_name={'required': False}, node_type={'required': False, 'default': 'cache.m1.small'}, num_nodes={'required': False, 'default': None, 'type': 'int'}, parameter_group={'required': False, 'default': None}, @@ -522,7 +512,6 @@ def main(): state = module.params['state'] engine = module.params['engine'] cache_engine_version = module.params['cache_engine_version'] - cache_parameter_group_name = module.params['cache_parameter_group_name'] node_type = module.params['node_type'] num_nodes = module.params['num_nodes'] cache_port = module.params['cache_port'] @@ -549,16 +538,13 @@ def main(): module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) elasticache_manager = ElastiCacheManager(module, name, engine, - cache_engine_version, - node_type, + cache_engine_version, node_type, num_nodes, cache_port, parameter_group, cache_subnet_group, cache_security_groups, security_group_ids, zone, wait, - hard_modify, region, - cache_parameter_group_name=cache_parameter_group_name, - **aws_connect_kwargs) + hard_modify, region, **aws_connect_kwargs) if state == 'present': elasticache_manager.ensure_present() From 0de2627efc63e0b1f6d24f0bd96d4e5f276ad275 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 2 Nov 2015 17:33:04 -0800 Subject: [PATCH 071/200] Make cache_parameter_group the name of this new param to match with similar params (leave old name as an alias) --- cloud/amazon/elasticache.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index d275ba2be82..a22bea70d72 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -63,6 +63,7 @@ options: required: false default: None version_added: "2.0" + aliases: [ 'parameter_group' ] cache_subnet_group: description: - The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc @@ -150,7 +151,7 @@ class ElastiCacheManager(object): EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying'] def __init__(self, module, name, engine, cache_engine_version, node_type, - num_nodes, cache_port, parameter_group, cache_subnet_group, + num_nodes, cache_port, cache_parameter_group, cache_subnet_group, cache_security_groups, security_group_ids, zone, wait, hard_modify, region, **aws_connect_kwargs): self.module = module @@ -160,7 +161,7 @@ class ElastiCacheManager(object): self.node_type = node_type self.num_nodes = num_nodes self.cache_port = cache_port - self.parameter_group = parameter_group + self.cache_parameter_group = cache_parameter_group self.cache_subnet_group = cache_subnet_group self.cache_security_groups = cache_security_groups self.security_group_ids = security_group_ids @@ -219,7 +220,7 @@ class ElastiCacheManager(object): engine_version=self.cache_engine_version, cache_security_group_names=self.cache_security_groups, security_group_ids=self.security_group_ids, - cache_parameter_group_name=self.parameter_group, + cache_parameter_group_name=self.cache_parameter_group, cache_subnet_group_name=self.cache_subnet_group, preferred_availability_zone=self.zone, port=self.cache_port) @@ -295,7 +296,7 @@ class ElastiCacheManager(object): num_cache_nodes=self.num_nodes, cache_node_ids_to_remove=nodes_to_remove, cache_security_group_names=self.cache_security_groups, - cache_parameter_group_name=self.parameter_group, + cache_parameter_group_name=self.cache_parameter_group, security_group_ids=self.security_group_ids, apply_immediately=True, engine_version=self.cache_engine_version) @@ -486,7 +487,8 @@ def main(): cache_engine_version={'required': False}, node_type={'required': False, 'default': 'cache.m1.small'}, num_nodes={'required': False, 'default': None, 'type': 'int'}, - parameter_group={'required': False, 'default': None}, + # alias for compat with the original PR 1950 + cache_parameter_group={'required': False, 'default': None, 'aliases': ['parameter_group']}, cache_port={'required': False, 'type': 'int'}, cache_subnet_group={'required': False, 'default': None}, cache_security_groups={'required': False, 'default': [default], @@ -521,7 +523,7 @@ def main(): zone = module.params['zone'] wait = module.params['wait'] hard_modify = module.params['hard_modify'] - parameter_group = module.params['parameter_group'] + cache_parameter_group = module.params['cache_parameter_group'] if cache_subnet_group and cache_security_groups == [default]: cache_security_groups = [] @@ -540,7 +542,7 @@ def main(): elasticache_manager = ElastiCacheManager(module, name, engine, cache_engine_version, node_type, num_nodes, cache_port, - parameter_group, + cache_parameter_group, cache_subnet_group, cache_security_groups, security_group_ids, zone, wait, From fa2ea225dddaaf82dfa3800746179112fec67c4f Mon Sep 17 00:00:00 2001 From: Lars Larsson Date: Tue, 3 Nov 2015 11:54:31 +0100 Subject: [PATCH 072/200] total_seconds not present on timedelta on python2.6 --- utilities/logic/wait_for.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/utilities/logic/wait_for.py b/utilities/logic/wait_for.py index 1287d9b6057..e30dec11fc6 100644 --- a/utilities/logic/wait_for.py +++ b/utilities/logic/wait_for.py @@ -103,7 +103,7 @@ options: notes: - The ability to use search_regex with a port connection was added in 1.7. requirements: [] -author: +author: - "Jeroen Hoekx (@jhoekx)" - "John Jarvis (@jarv)" - "Andrii Radyk (@AnderEnder)" @@ -127,7 +127,7 @@ EXAMPLES = ''' - wait_for: path=/tmp/foo search_regex=completed # wait until the lock file is removed -- wait_for: path=/var/lock/file.lock state=absent +- wait_for: path=/var/lock/file.lock state=absent # wait until the process is finished and pid was destroyed - wait_for: path=/proc/3466/status state=absent @@ -322,6 +322,11 @@ def _create_connection( (host, port), connect_timeout): connect_socket = socket.create_connection( (host, port), connect_timeout) return connect_socket +def _timedelta_total_seconds(timedelta): + return ( + timedelta.microseconds + 0.0 + + (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6 + def main(): module = AnsibleModule( @@ -432,7 +437,7 @@ def main(): except IOError: pass elif port: - alt_connect_timeout = math.ceil((end - datetime.datetime.now()).total_seconds()) + alt_connect_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.now())) try: s = _create_connection((host, port), min(connect_timeout, alt_connect_timeout)) except: @@ -444,7 +449,7 @@ def main(): data = '' matched = False while datetime.datetime.now() < end: - max_timeout = math.ceil((end - datetime.datetime.now()).total_seconds()) + max_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.now())) (readable, w, e) = select.select([s], [], [], max_timeout) if not readable: # No new data. Probably means our timeout From 2a97e9f2997136a13e7df72f8246cd33438087c0 Mon Sep 17 00:00:00 2001 From: Felix Engelmann Date: Sun, 30 Aug 2015 16:24:13 +0200 Subject: [PATCH 073/200] re-implements #226 in optional (editable) way with backward compatibility --- packaging/language/pip.py | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) mode change 100644 => 100755 packaging/language/pip.py diff --git a/packaging/language/pip.py b/packaging/language/pip.py old mode 100644 new mode 100755 index a4af27ccee5..bdd2b40a1aa --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -90,6 +90,12 @@ options: required: false default: null version_added: "1.0" + editable: + description: + - Pass the editable flag for versioning URLs. + required: false + default: yes + version_added: "2.0" chdir: description: - cd into this directory before running the command @@ -121,6 +127,9 @@ EXAMPLES = ''' # Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args. - pip: name='svn+http://myrepo/svn/MyApp#egg=MyApp' +# Install MyApp using one of the remote protocols (bzr+,hg+,git+) in a non editable way. +- pip: name='git+http://myrepo/app/MyApp' editable=false + # Install (MyApp) from local tarball - pip: name='file:///path/to/MyApp.tar.gz' @@ -239,6 +248,7 @@ def main(): virtualenv_python=dict(default=None, required=False, type='str'), use_mirrors=dict(default='yes', type='bool'), extra_args=dict(default=None, required=False), + editable=dict(default='yes', type='bool', required=False), chdir=dict(default=None, required=False, type='path'), executable=dict(default=None, required=False), ), @@ -312,15 +322,16 @@ def main(): # Automatically apply -e option to extra_args when source is a VCS url. VCS # includes those beginning with svn+, git+, hg+ or bzr+ if name: - if name.startswith('svn+') or name.startswith('git+') or \ - name.startswith('hg+') or name.startswith('bzr+'): - args_list = [] # used if extra_args is not used at all - if extra_args: - args_list = extra_args.split(' ') - if '-e' not in args_list: - args_list.append('-e') - # Ok, we will reconstruct the option string - extra_args = ' '.join(args_list) + if module.params['editable']: + if name.startswith('svn+') or name.startswith('git+') or \ + name.startswith('hg+') or name.startswith('bzr+'): + args_list = [] # used if extra_args is not used at all + if extra_args: + args_list = extra_args.split(' ') + if '-e' not in args_list: + args_list.append('-e') + # Ok, we will reconstruct the option string + extra_args = ' '.join(args_list) if extra_args: cmd += ' %s' % extra_args From a95fee40793666b534898bacb2fb145b361d86d9 Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Thu, 6 Aug 2015 13:34:25 +0100 Subject: [PATCH 074/200] Use 'pip freeze' output to detect changes with requirement specified If the requirements contains a repos url it will always report 'Successfully installed'; there is no difference in the output to tell apart if anything new was pulled. Use freeze to detect if the environment changed in any way. Should fix ansible/ansible#1705 --- packaging/language/pip.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index bdd2b40a1aa..3b5f396ab45 100755 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -363,6 +363,12 @@ def main(): changed = (state == 'present' and not is_present) or (state == 'absent' and is_present) module.exit_json(changed=changed, cmd=freeze_cmd, stdout=out, stderr=err) + if requirements: + freeze_cmd = '%s freeze' % pip + out_freeze_before = module.run_command(freeze_cmd, cwd=chdir)[1] + else: + out_freeze_before = None + rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir) out += out_pip err += err_pip @@ -375,7 +381,11 @@ def main(): if state == 'absent': changed = 'Successfully uninstalled' in out_pip else: - changed = 'Successfully installed' in out_pip + if out_freeze_before is None: + changed = 'Successfully installed' in out_pip + else: + out_freeze_after = module.run_command(freeze_cmd, cwd=chdir)[1] + changed = out_freeze_before != out_freeze_after module.exit_json(changed=changed, cmd=cmd, name=name, version=version, state=state, requirements=requirements, virtualenv=env, From c860af29b28a1c66a13d8e9d4d8d7a518ae9a75c Mon Sep 17 00:00:00 2001 From: Daniele Varrazzo Date: Thu, 6 Aug 2015 14:24:41 +0100 Subject: [PATCH 075/200] Detect unchanged pip runs when using a vcs url in name Should fix bug #1645 --- packaging/language/pip.py | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index 3b5f396ab45..6d325282770 100755 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -20,6 +20,7 @@ # import tempfile +import re import os DOCUMENTATION = ''' @@ -321,17 +322,15 @@ def main(): # Automatically apply -e option to extra_args when source is a VCS url. VCS # includes those beginning with svn+, git+, hg+ or bzr+ - if name: - if module.params['editable']: - if name.startswith('svn+') or name.startswith('git+') or \ - name.startswith('hg+') or name.startswith('bzr+'): - args_list = [] # used if extra_args is not used at all - if extra_args: - args_list = extra_args.split(' ') - if '-e' not in args_list: - args_list.append('-e') - # Ok, we will reconstruct the option string - extra_args = ' '.join(args_list) + has_vcs = bool(name and re.match(r'(svn|git|hg|bzr)\+', name)) + if has_vcs and module.params['editable']: + args_list = [] # used if extra_args is not used at all + if extra_args: + args_list = extra_args.split(' ') + if '-e' not in args_list: + args_list.append('-e') + # Ok, we will reconstruct the option string + extra_args = ' '.join(args_list) if extra_args: cmd += ' %s' % extra_args @@ -344,8 +343,7 @@ def main(): if module.check_mode: if extra_args or requirements or state == 'latest' or not name: module.exit_json(changed=True) - elif name.startswith('svn+') or name.startswith('git+') or \ - name.startswith('hg+') or name.startswith('bzr+'): + elif has_vcs: module.exit_json(changed=True) freeze_cmd = '%s freeze' % pip @@ -363,7 +361,7 @@ def main(): changed = (state == 'present' and not is_present) or (state == 'absent' and is_present) module.exit_json(changed=changed, cmd=freeze_cmd, stdout=out, stderr=err) - if requirements: + if requirements or has_vcs: freeze_cmd = '%s freeze' % pip out_freeze_before = module.run_command(freeze_cmd, cwd=chdir)[1] else: From cac69f3135d6e4934dcf4a143f7e1717f1a82506 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 3 Nov 2015 13:25:46 -0500 Subject: [PATCH 076/200] added missing version_added --- cloud/amazon/ec2_ami.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_ami.py b/cloud/amazon/ec2_ami.py index d7a60211bd3..bdb130e2380 100644 --- a/cloud/amazon/ec2_ami.py +++ b/cloud/amazon/ec2_ami.py @@ -86,8 +86,7 @@ options: - Users and groups that should be able to launch the ami. Expects dictionary with a key of user_ids and/or group_names. user_ids should be a list of account ids. group_name should be a list of groups, "all" is the only acceptable value currently. required: false default: null - aliases: [] - + version_added: "2.0" author: "Evan Duffield (@scicoin-project) " extends_documentation_fragment: - aws From 5b904c1401dee1f6c02780aeb696c01bea398762 Mon Sep 17 00:00:00 2001 From: Leonty Date: Wed, 4 Nov 2015 12:43:03 +0300 Subject: [PATCH 077/200] Corrected misspelling in the 'labels' docker parameter documentation. --- cloud/docker/docker.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index c22013bb933..c94df54ad89 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -323,10 +323,9 @@ options: labels: description: - Set container labels. Requires docker >= 1.6 and docker-py >= 1.2.0. - requered: false + required: false default: null version_added: "1.9.4" - author: - "Cove Schneider (@cove)" - "Joshua Conner (@joshuaconner)" From adbc430984679ca9469347add900895076c8fedd Mon Sep 17 00:00:00 2001 From: Stewart Rutledge Date: Wed, 4 Nov 2015 14:22:08 +0100 Subject: [PATCH 078/200] Added support for reconfiguring network (moving to another switch, for example) --- cloud/vmware/vsphere_guest.py | 101 ++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index b8adb7930c3..a14f807e049 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -743,6 +743,9 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name # set the new RAM size spec.set_element_memoryMB(int(vm_hardware['memory_mb'])) changes['memory'] = vm_hardware['memory_mb'] + # ===( Reconfigure Network )====# + if vm_nic: + changed = reconfigure_net(vsphere_client, vm, module, esxi, resource_pool, guest, vm_nic, cluster_name) # ====( Config Memory )====# if 'num_cpus' in vm_hardware: @@ -814,6 +817,104 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name module.exit_json(changed=False) +def reconfigure_net(vsphere_client, vm, module, esxi, resource_pool, guest, vm_nic, cluster_name=None): + s = vsphere_client + nics = {} + request = VI.ReconfigVM_TaskRequestMsg() + _this = request.new__this(vm._mor) + _this.set_attribute_type(vm._mor.get_attribute_type()) + request.set_element__this(_this) + nic_changes = [] + datacenter = esxi['datacenter'] + # Datacenter managed object reference + dclist = [k for k, + v in vsphere_client.get_datacenters().items() if v == datacenter] + if dclist: + dcmor=dclist[0] + else: + vsphere_client.disconnect() + module.fail_json(msg="Cannot find datacenter named: %s" % datacenter) + dcprops = VIProperty(vsphere_client, dcmor) + nfmor = dcprops.networkFolder._obj + for k,v in vm_nic.iteritems(): + nicNum = k[len(k) -1] + if vm_nic[k]['network_type'] == 'dvs': + portgroupKey = find_portgroup_key(module, s, nfmor, vm_nic[k]['network']) + todvs = True + elif vm_nic[k]['network_type'] == 'standard': + todvs = False + # Detect cards that need to be changed and network type (and act accordingly) + for dev in vm.properties.config.hardware.device: + if dev._type in ["VirtualE1000", "VirtualE1000e", + "VirtualPCNet32", "VirtualVmxnet", + "VirtualNmxnet2", "VirtualVmxnet3"]: + devNum = dev.deviceInfo.label[len(dev.deviceInfo.label) - 1] + if devNum == nicNum: + fromdvs = dev.deviceInfo.summary.split(':')[0] == 'DVSwitch' + if todvs and fromdvs: + if dev.backing.port._obj.get_element_portgroupKey() != portgroupKey: + nics[k] = (dev, portgroupKey, 1) + elif fromdvs and not todvs: + nics[k] = (dev, '', 2) + elif not fromdvs and todvs: + nics[k] = (dev, portgroupKey, 3) + elif not fromdvs and not todvs: + if dev.backing._obj.get_element_deviceName() != vm_nic[k]['network']: + nics[k] = (dev, '', 2) + else: + pass + else: + module.exit_json() + + if len(nics) > 0: + for nic, obj in nics.iteritems(): + """ + 1,2 and 3 are used to mark which action should be taken + 1 = from a distributed switch to a distributed switch + 2 = to a standard switch + 3 = to a distributed switch + """ + dev = obj[0] + pgKey = obj[1] + dvsKey = obj[2] + if dvsKey == 1: + dev.backing.port._obj.set_element_portgroupKey(pgKey) + dev.backing.port._obj.set_element_portKey('') + if dvsKey == 3: + dvswitch_uuid = find_dvswitch_uuid(module, s, nfmor, pgKey) + nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def( + "nic_backing_port").pyclass() + nic_backing_port.set_element_switchUuid(dvswitch_uuid) + nic_backing_port.set_element_portgroupKey(pgKey) + nic_backing_port.set_element_portKey('') + nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def( + "nic_backing").pyclass() + nic_backing.set_element_port(nic_backing_port) + dev._obj.set_element_backing(nic_backing) + if dvsKey == 2: + nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def( + "nic_backing").pyclass() + nic_backing.set_element_deviceName(vm_nic[nic]['network']) + dev._obj.set_element_backing(nic_backing) + for nic, obj in nics.iteritems(): + dev = obj[0] + spec = request.new_spec() + nic_change = spec.new_deviceChange() + nic_change.set_element_device(dev._obj) + nic_change.set_element_operation("edit") + nic_changes.append(nic_change) + spec.set_element_deviceChange(nic_changes) + request.set_element_spec(spec) + ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval + task = VITask(ret, vsphere_client) + status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) + if status == task.STATE_SUCCESS: + return(True) + elif status == task.STATE_ERROR: + module.fail_json(msg="Could not change network %s" % task.get_error_message()) + elif len(nics) == 0: + return(False) + def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, vm_hw_version, state): datacenter = esxi['datacenter'] From 76cd8381f8f6497a410370b6e7293b7787f7b48d Mon Sep 17 00:00:00 2001 From: Rabenstein Date: Wed, 4 Nov 2015 14:54:46 +0100 Subject: [PATCH 079/200] Absent unction was not working on user with login profile also fixed the exception handling --- cloud/amazon/iam.py | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index 8864cb10a6f..8f068a942c4 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -192,14 +192,18 @@ def create_user(module, iam, name, pwd, path, key_state, key_count): def delete_user(module, iam, name): + del_meta = '' try: current_keys = [ck['access_key_id'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] for key in current_keys: iam.delete_access_key(key, name) + login_profile = iam.get_login_profiles(name) + if login_profile: + iam.delete_login_profile(name) del_meta = iam.delete_user(name).delete_user_response - except boto.exception.BotoServerError, err: - error_msg = boto_exception(err) + except Exception as ex: + module.fail_json(changed=False, msg="delete failed %s" %ex) if ('must detach all policies first') in error_msg: for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names: iam.delete_user_policy(name, policy) @@ -213,7 +217,7 @@ def delete_user(module, iam, name): "currently supported by boto. Please detach the polices " "through the console and try again." % name) else: - module.fail_json(changed=changed, msg=str(err)) + module.fail_json(changed=changed, msg=str(del_meta)) else: changed = True return del_meta, name, changed @@ -647,15 +651,20 @@ def main(): else: module.exit_json( changed=changed, groups=user_groups, user_name=name, keys=key_list) + elif state == 'update' and not user_exists: module.fail_json( msg="The user %s does not exit. No update made." % name) + elif state == 'absent': - if name in orig_user_list: - set_users_groups(module, iam, name, '') - del_meta, name, changed = delete_user(module, iam, name) - module.exit_json( - deletion_meta=del_meta, deleted_user=name, changed=changed) + if user_exists: + try: + set_users_groups(module, iam, name, '') + del_meta, name, changed = delete_user(module, iam, name) + module.exit_json(deleted_user=name, changed=changed, orig_user_list=orig_user_list) + + except Exception as ex: + module.fail_json(changed=changed, msg=str(ex)) else: module.exit_json( changed=False, msg="User %s is already absent from your AWS IAM users" % name) @@ -687,9 +696,11 @@ def main(): if not new_path and not new_name: module.exit_json( changed=changed, group_name=name, group_path=cur_path) + elif state == 'update' and not group_exists: module.fail_json( changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name) + elif state == 'absent': if name in orig_group_list: removed_group, changed = delete_group(iam=iam, name=name) From 3d8f0b5d95691f76a3849f67b2d3f5bda4a908aa Mon Sep 17 00:00:00 2001 From: Rabenstein Date: Wed, 4 Nov 2015 15:34:48 +0100 Subject: [PATCH 080/200] fixed the delete user function now works with or without loginprofile (password) --- cloud/amazon/iam.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index 8f068a942c4..86c9723afe5 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -198,10 +198,16 @@ def delete_user(module, iam, name): iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] for key in current_keys: iam.delete_access_key(key, name) - login_profile = iam.get_login_profiles(name) - if login_profile: - iam.delete_login_profile(name) - del_meta = iam.delete_user(name).delete_user_response + try: + login_profile = iam.get_login_profiles(name).get_login_profile_response + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('Cannot find Login Profile') in error_msg: + + del_meta = iam.delete_user(name).delete_user_response + else: + iam.delete_login_profile(name) + del_meta = iam.delete_user(name).delete_user_response except Exception as ex: module.fail_json(changed=False, msg="delete failed %s" %ex) if ('must detach all policies first') in error_msg: @@ -661,7 +667,7 @@ def main(): try: set_users_groups(module, iam, name, '') del_meta, name, changed = delete_user(module, iam, name) - module.exit_json(deleted_user=name, changed=changed, orig_user_list=orig_user_list) + module.exit_json(deleted_user=name, changed=changed) except Exception as ex: module.fail_json(changed=changed, msg=str(ex)) From b655b6ae2683c12f660dcb4f9483496ef814a3c6 Mon Sep 17 00:00:00 2001 From: Rabenstein Date: Wed, 4 Nov 2015 15:39:34 +0100 Subject: [PATCH 081/200] typo --- cloud/amazon/iam.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index 86c9723afe5..5aef25a2602 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -223,7 +223,7 @@ def delete_user(module, iam, name): "currently supported by boto. Please detach the polices " "through the console and try again." % name) else: - module.fail_json(changed=changed, msg=str(del_meta)) + module.fail_json(changed=changed, msg=str(error_msg)) else: changed = True return del_meta, name, changed From 2aeb188d81a22d030398ff4018b5cf676ca0e5f4 Mon Sep 17 00:00:00 2001 From: Lee Hardy Date: Wed, 4 Nov 2015 16:37:18 +0000 Subject: [PATCH 082/200] - fix user_exists statement with host_all to use only username parameter --- database/mysql/mysql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index acf093f8490..d63fd41f44f 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -226,7 +226,7 @@ def connect(module, login_user=None, login_password=None, config_file=''): def user_exists(cursor, user, host, host_all): if host_all: - cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host)) + cursor.execute("SELECT count(*) FROM user WHERE user = %s", user) else: cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host)) From 3f5d6df5f717d80e8a59fe66ca6be79e491ec80a Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Wed, 4 Nov 2015 17:29:08 -0800 Subject: [PATCH 083/200] fixed ansible_totalmem fact returning 0 Win32_PhysicalMemory CIM object is busted on some virtual environments, switched to Win32_ComputerSystem.TotalPhysicalMemory --- windows/setup.ps1 | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/windows/setup.ps1 b/windows/setup.ps1 index 0b3e2c897e3..4d163c7ec26 100644 --- a/windows/setup.ps1 +++ b/windows/setup.ps1 @@ -26,11 +26,9 @@ $result = New-Object psobject @{ }; $win32_os = Get-CimInstance Win32_OperatingSystem +$win32_cs = Get-CimInstance Win32_ComputerSystem $osversion = [Environment]::OSVersion -$memory = @() -$memory += Get-WmiObject win32_Physicalmemory -$capacity = 0 -$memory | foreach {$capacity += $_.Capacity} +$capacity = $win32_cs.TotalPhysicalMemory # Win32_PhysicalMemory is empty on some virtual platforms $netcfg = Get-WmiObject win32_NetworkAdapterConfiguration $ActiveNetcfg = @(); $ActiveNetcfg+= $netcfg | where {$_.ipaddress -ne $null} From 77d3678acfa3a19517aeb45214118aa8b74637a9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 6 Nov 2015 09:31:20 -0800 Subject: [PATCH 084/200] Fix escaping of newline carriage return characters in the documentation --- windows/win_template.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/windows/win_template.py b/windows/win_template.py index e8323362dd6..4ffcaafe2c7 100644 --- a/windows/win_template.py +++ b/windows/win_template.py @@ -15,7 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: win_template version_added: "1.9.2" @@ -47,8 +47,8 @@ notes: - "templates are loaded with C(trim_blocks=True)." - By default, windows line endings are not created in the generated file. - "In order to ensure windows line endings are in the generated file, add the following header - as the first line of your template: #jinja2: newline_sequence:'\r\n' and ensure each line - of the template ends with \r\n" + as the first line of your template: #jinja2: newline_sequence:'\\\\r\\\\n' and ensure each line + of the template ends with \\\\r\\\\n" - Beware fetching files from windows machines when creating templates because certain tools, such as Powershell ISE, and regedit's export facility add a Byte Order Mark as the first character of the file, which can cause tracebacks. From dd26c37f6b4454ceaffe1b298f3626979c65cf2f Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Fri, 6 Nov 2015 17:43:24 +0000 Subject: [PATCH 085/200] Update ec2_elb_lb.py add connection draining default --- cloud/amazon/ec2_elb_lb.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 954f06496ae..8488b78a110 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -107,6 +107,7 @@ options: description: - Wait a specified timeout allowing connections to drain before terminating an instance required: false + default: "no" aliases: [] version_added: "1.8" idle_timeout: From 4ed7b690f685f87934fdb304b302f96de9f09139 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 6 Nov 2015 10:53:33 -0800 Subject: [PATCH 086/200] Add a new contributor as a maintainer of the docker module --- cloud/docker/docker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 2b2e3ea9b4b..befe3bd0510 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -331,6 +331,7 @@ author: - "Joshua Conner (@joshuaconner)" - "Pavel Antonov (@softzilla)" - "Ash Wilson (@smashwilson)" + - "Thomas Steinbach (@ThomasSteinbach)" requirements: - "python >= 2.6" - "docker-py >= 0.3.0" From f2943bd4045a32e2c4967fbc87f4385b86cd1d79 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 6 Nov 2015 21:18:46 -0800 Subject: [PATCH 087/200] Add zfil as an owner of the docker module --- cloud/docker/docker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index befe3bd0510..0ecfb93b0c2 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -332,6 +332,7 @@ author: - "Pavel Antonov (@softzilla)" - "Ash Wilson (@smashwilson)" - "Thomas Steinbach (@ThomasSteinbach)" + - "Philippe Jandot (@zfil)" requirements: - "python >= 2.6" - "docker-py >= 0.3.0" From e74dc8c1ddc0fb6de51797c2c23881a109d6930a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 7 Nov 2015 08:23:35 -0500 Subject: [PATCH 088/200] minor doc fixes --- files/ini_file.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/files/ini_file.py b/files/ini_file.py index d837c329d4b..ce286741981 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -29,8 +29,7 @@ description: - Manage (add, remove, change) individual settings in an INI-style file without having to manage the file as a whole with, say, M(template) or M(assemble). Adds missing sections if they don't exist. - - Comments are discarded when the source file is read, and therefore will not - show up in the destination file. + - Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file. version_added: "0.9" options: dest: @@ -80,7 +79,9 @@ notes: Either use M(template) to create a base INI file with a C([default]) section, or use M(lineinfile) to add the missing line. requirements: [ ConfigParser ] -author: "Jan-Piet Mens (@jpmens), Ales Nosek" +author: + - "Jan-Piet Mens (@jpmens)" + - "Ales Nosek (@noseka1)" ''' EXAMPLES = ''' From ca12ed5d98eec5c4ec210c11c34e7efa31405693 Mon Sep 17 00:00:00 2001 From: Timothy Appnel Date: Sat, 7 Nov 2015 18:16:20 -0500 Subject: [PATCH 089/200] Added the checksum_algo alias to the stats module. --- files/stat.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/files/stat.py b/files/stat.py index 61c77a2ef31..852ddd5afd2 100644 --- a/files/stat.py +++ b/files/stat.py @@ -53,6 +53,7 @@ options: required: false choices: [ 'sha1', 'sha224', 'sha256', 'sha384', 'sha512' ] default: sha1 + aliases: [ 'checksum_algo' ] version_added: "2.0" author: "Bruce Pennypacker (@bpennypacker)" ''' @@ -292,7 +293,7 @@ def main(): follow = dict(default='no', type='bool'), get_md5 = dict(default='yes', type='bool'), get_checksum = dict(default='yes', type='bool'), - checksum_algorithm = dict(default='sha1', type='str', choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512']) + checksum_algorithm = dict(default='sha1', type='str', choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512'], aliases=['checksum_algo']) ), supports_check_mode = True ) @@ -381,4 +382,4 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() \ No newline at end of file +main() From 9618986804428c29670f2409903d84e9e3df6950 Mon Sep 17 00:00:00 2001 From: Andy Nelson Date: Tue, 11 Aug 2015 19:51:59 +0100 Subject: [PATCH 090/200] Update to ec2_vpc.py to: 1 allow interface ids and vpc peering connections as route targets 2 set state to "terminated" when VPC is removed 3 fix some comment typos updates per PR comments --- cloud/amazon/ec2_vpc.py | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/cloud/amazon/ec2_vpc.py b/cloud/amazon/ec2_vpc.py index a3003a6dcc6..741e73de479 100644 --- a/cloud/amazon/ec2_vpc.py +++ b/cloud/amazon/ec2_vpc.py @@ -72,7 +72,7 @@ options: aliases: [] route_tables: description: - - 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. resource_tags is optional and uses dictionary form: { "Name": "public", ... }. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.' + - 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids, interface-ids, and vpc-peering-connection-ids in addition igw. resource_tags is optional and uses dictionary form: { "Name": "public", ... }. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.' required: false default: null aliases: [] @@ -234,25 +234,29 @@ def routes_match(rt_list=None, rt=None, igw=None): Returns: True when there provided routes and remote routes are the same. - False when provided routes and remote routes are diffrent. + False when provided routes and remote routes are different. """ local_routes = [] remote_routes = [] for route in rt_list: - route_kwargs = {} + route_kwargs = { + 'gateway_id': None, + 'instance_id': None, + 'interface_id': None, + 'vpc_peering_connection_id': None, + 'state': 'active' + } if route['gw'] == 'igw': route_kwargs['gateway_id'] = igw.id - route_kwargs['instance_id'] = None - route_kwargs['state'] = 'active' elif route['gw'].startswith('i-'): route_kwargs['instance_id'] = route['gw'] - route_kwargs['gateway_id'] = None - route_kwargs['state'] = 'active' + elif route['gw'].startswith('eni-'): + route_kwargs['interface_id'] = route['gw'] + elif route['gw'].startswith('pcx-'): + route_kwargs['vpc_peering_connection_id'] = route['gw'] else: route_kwargs['gateway_id'] = route['gw'] - route_kwargs['instance_id'] = None - route_kwargs['state'] = 'active' route_kwargs['destination_cidr_block'] = route['dest'] local_routes.append(route_kwargs) for j in rt.routes: @@ -280,7 +284,7 @@ def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=Non igw : The internet gateway object for this vpc Returns: - True when there is diffrence beween the provided routes and remote routes and if subnet assosications are diffrent. + True when there is difference between the provided routes and remote routes and if subnet associations are different. False when both routes and subnet associations matched. """ @@ -509,6 +513,10 @@ def create_vpc(module, vpc_conn): route_kwargs['gateway_id'] = igw.id elif route['gw'].startswith('i-'): route_kwargs['instance_id'] = route['gw'] + elif route['gw'].startswith('eni-'): + route_kwargs['interface_id'] = route['gw'] + elif route['gw'].startswith('pcx-'): + route_kwargs['vpc_peering_connection_id'] = route['gw'] else: route_kwargs['gateway_id'] = route['gw'] vpc_conn.create_route(new_rt.id, route['dest'], **route_kwargs) @@ -652,6 +660,7 @@ def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None): msg='Unable to delete VPC {0}, error: {1}'.format(vpc.id, e) ) changed = True + vpc_dict['state'] = "terminated" return (changed, vpc_dict, terminated_vpc_id) From ccb39767cf7841682d4f89b87030f2d4761b5751 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 9 Nov 2015 10:39:56 -0500 Subject: [PATCH 091/200] Use add_ips_to_server API This module is still using an old pre-release API and needs to update to use the current API. Co-Authored-By: Marton Kiss --- cloud/openstack/os_floating_ip.py | 25 +++++++------------------ 1 file changed, 7 insertions(+), 18 deletions(-) diff --git a/cloud/openstack/os_floating_ip.py b/cloud/openstack/os_floating_ip.py index 10827012ae8..957e3057375 100644 --- a/cloud/openstack/os_floating_ip.py +++ b/cloud/openstack/os_floating_ip.py @@ -122,10 +122,10 @@ def main(): argument_spec = openstack_full_argument_spec( server=dict(required=True), state=dict(default='present', choices=['absent', 'present']), - network=dict(required=False), - floating_ip_address=dict(required=False), + network=dict(required=False, default=None), + floating_ip_address=dict(required=False, default=None), reuse=dict(required=False, type='bool', default=False), - fixed_address=dict(required=False), + fixed_address=dict(required=False, default=None), wait=dict(required=False, type='bool', default=False), timeout=dict(required=False, type='int', default=60), ) @@ -154,23 +154,12 @@ def main(): msg="server {0} not found".format(server_name_or_id)) if state == 'present': - if floating_ip_address is None: - if reuse: - f_ip = cloud.available_floating_ip(network=network) - else: - f_ip = cloud.create_floating_ip(network=network) - else: - f_ip = _get_floating_ip(cloud, floating_ip_address) - if f_ip is None: - module.fail_json( - msg="floating IP {0} not found".format( - floating_ip_address)) - - cloud.attach_ip_to_server( - server_id=server['id'], floating_ip_id=f_ip['id'], + cloud.add_ips_to_server( + server=server, ips=floating_ip_address, reuse=reuse, fixed_address=fixed_address, wait=wait, timeout=timeout) + fip_address = cloud.get_server_public_ip(server) # Update the floating IP status - f_ip = cloud.get_floating_ip(id=f_ip['id']) + f_ip = _get_floating_ip(cloud, fip_address) module.exit_json(changed=True, floating_ip=f_ip) elif state == 'absent': From 7d665db5e5ed20f036b28885c2b8f03c9285c631 Mon Sep 17 00:00:00 2001 From: Rabenstein Date: Wed, 4 Nov 2015 14:54:46 +0100 Subject: [PATCH 092/200] Squash of 3 commits for bugfix. Absent unction was not working on user with login profile also fixed the exception handling fixed the delete user function now works with or without loginprofile (password) typo --- cloud/amazon/iam.py | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index 8864cb10a6f..5aef25a2602 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -192,14 +192,24 @@ def create_user(module, iam, name, pwd, path, key_state, key_count): def delete_user(module, iam, name): + del_meta = '' try: current_keys = [ck['access_key_id'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] for key in current_keys: iam.delete_access_key(key, name) - del_meta = iam.delete_user(name).delete_user_response - except boto.exception.BotoServerError, err: - error_msg = boto_exception(err) + try: + login_profile = iam.get_login_profiles(name).get_login_profile_response + except boto.exception.BotoServerError, err: + error_msg = boto_exception(err) + if ('Cannot find Login Profile') in error_msg: + + del_meta = iam.delete_user(name).delete_user_response + else: + iam.delete_login_profile(name) + del_meta = iam.delete_user(name).delete_user_response + except Exception as ex: + module.fail_json(changed=False, msg="delete failed %s" %ex) if ('must detach all policies first') in error_msg: for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names: iam.delete_user_policy(name, policy) @@ -213,7 +223,7 @@ def delete_user(module, iam, name): "currently supported by boto. Please detach the polices " "through the console and try again." % name) else: - module.fail_json(changed=changed, msg=str(err)) + module.fail_json(changed=changed, msg=str(error_msg)) else: changed = True return del_meta, name, changed @@ -647,15 +657,20 @@ def main(): else: module.exit_json( changed=changed, groups=user_groups, user_name=name, keys=key_list) + elif state == 'update' and not user_exists: module.fail_json( msg="The user %s does not exit. No update made." % name) + elif state == 'absent': - if name in orig_user_list: - set_users_groups(module, iam, name, '') - del_meta, name, changed = delete_user(module, iam, name) - module.exit_json( - deletion_meta=del_meta, deleted_user=name, changed=changed) + if user_exists: + try: + set_users_groups(module, iam, name, '') + del_meta, name, changed = delete_user(module, iam, name) + module.exit_json(deleted_user=name, changed=changed) + + except Exception as ex: + module.fail_json(changed=changed, msg=str(ex)) else: module.exit_json( changed=False, msg="User %s is already absent from your AWS IAM users" % name) @@ -687,9 +702,11 @@ def main(): if not new_path and not new_name: module.exit_json( changed=changed, group_name=name, group_path=cur_path) + elif state == 'update' and not group_exists: module.fail_json( changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name) + elif state == 'absent': if name in orig_group_list: removed_group, changed = delete_group(iam=iam, name=name) From 2c95641d66d5ca0eac3bb95d7361a71eb89758d1 Mon Sep 17 00:00:00 2001 From: Steve Spencer Date: Wed, 11 Nov 2015 16:44:01 +0200 Subject: [PATCH 093/200] Add support for mounting host volumes with Z and z options --- cloud/docker/docker.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 0ecfb93b0c2..12e7851f910 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -80,7 +80,7 @@ options: volumes: description: - List of volumes to mount within the container using docker CLI-style - - 'syntax: C(/host:/container[:mode]) where "mode" may be "rw" or "ro".' + - 'syntax: C(/host:/container[:mode]) where "mode" may be "rw", "ro", "Z", "z".' default: null volumes_from: description: @@ -626,14 +626,14 @@ class DockerManager(object): # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) elif 2 <= len(parts) <= 3: # default to read-write - ro = False + mode = 'rw' # with supplied bind mode if len(parts) == 3: - if parts[2] not in ['ro', 'rw']: - self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') + if parts[2] not in ['ro', 'rw', 'z', 'Z']: + self.module.fail_json(msg='bind mode needs to be one of "ro", "rw", "z", or "Z"') else: - ro = parts[2] == 'ro' - self.binds[parts[0]] = {'bind': parts[1], 'ro': ro } + mode = parts[2] + self.binds[parts[0]] = {'bind': parts[1], 'mode': mode } else: self.module.fail_json(msg='volumes support 1 to 3 arguments') @@ -1197,10 +1197,7 @@ class DockerManager(object): for host_path, config in self.binds.iteritems(): if isinstance(config, dict): container_path = config['bind'] - if config['ro']: - mode = 'ro' - else: - mode = 'rw' + mode = config['mode'] else: container_path = config mode = 'rw' From 6584b59d91d096406453b8cfc816730bbba0267e Mon Sep 17 00:00:00 2001 From: Jordi De Groof Date: Wed, 11 Nov 2015 20:23:24 +0100 Subject: [PATCH 094/200] Update facts when hostname is changed ansible_hostname contains the unqualified hostname --- system/hostname.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/system/hostname.py b/system/hostname.py index 2914088691a..0d4ca085b83 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -42,6 +42,7 @@ EXAMPLES = ''' - hostname: name=web01 ''' +import socket from distutils.version import LooseVersion # import module snippets @@ -563,6 +564,10 @@ def main(): hostname.set_permanent_hostname(name) changed = True - module.exit_json(changed=changed, name=name, ansible_facts=dict(ansible_hostname=name)) + module.exit_json(changed=changed, name=name, + ansible_facts=dict(ansible_hostname=name.split('.')[0], + ansible_nodename=name, + ansible_fqdn=socket.getfqdn(), + ansible_domain='.'.join(socket.getfqdn().split('.')[1:]))) main() From 889274a5256fb2f60667d2964e233141eeaa14fd Mon Sep 17 00:00:00 2001 From: J Levitt Date: Wed, 11 Nov 2015 15:15:30 -0600 Subject: [PATCH 095/200] Add rds restore example to list of examples There was no db restore example. I've provided one that shows how to do the restore, then add a security group (you cannot add the security group during the restore step -- it has to be done in a modify step afterward). Also, I show how to get the endpoint. --- cloud/amazon/rds.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 1eb4cc8ab1d..d8f5a2cea86 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -271,6 +271,33 @@ EXAMPLES = ''' command: reboot instance_name: database wait: yes + +# Restore a Postgres db instance from a snapshot, wait for it to become available again, and +# then modify it to add your security group. Also, display the new endpoint. +# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI +- local_action: + module: rds + command: restore + snapshot: mypostgres-snapshot + instance_name: MyNewInstanceName + region: us-west-2 + zone: us-west-2b + subnet: default-vpc-xx441xxx + publicly_accessible: yes + wait: yes + wait_timeout: 600 + tags: + Name: pg1_test_name_tag + register: rds + +- local_action: + module: rds + command: modify + instance_name: MyNewInstanceName + region: us-west-2 + vpc_security_groups: sg-xxx945xx + +- debug: msg="The new db endpoint is {{ rds.instance.endpoint }}" ''' From fc5da26deeda577e8034b249caea6b7399b556d3 Mon Sep 17 00:00:00 2001 From: Steve Spencer Date: Thu, 12 Nov 2015 10:42:26 +0200 Subject: [PATCH 096/200] Sync up with allowable docker volume mounting modes --- cloud/docker/docker.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 12e7851f910..c6cf10f0783 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -79,8 +79,10 @@ options: version_added: "1.5" volumes: description: - - List of volumes to mount within the container using docker CLI-style - - 'syntax: C(/host:/container[:mode]) where "mode" may be "rw", "ro", "Z", "z".' + - List of volumes to mount within the container + - 'Use docker CLI-style syntax: C(/host:/container[:mode])' + - You can specify a read mode for the mount with either C(ro) or C(rw). SELinux hosts can additionally + use C(z) or C(Z) mount options to use a shared or private label for the volume. default: null volumes_from: description: @@ -629,8 +631,8 @@ class DockerManager(object): mode = 'rw' # with supplied bind mode if len(parts) == 3: - if parts[2] not in ['ro', 'rw', 'z', 'Z']: - self.module.fail_json(msg='bind mode needs to be one of "ro", "rw", "z", or "Z"') + if parts[2] not in ["rw", "rw,Z", "rw,z", "z,rw", "Z,rw", "Z", "z", "ro", "ro,Z", "ro,z", "z,ro", "Z,ro"]: + self.module.fail_json(msg='invalid bind mode ' + parts[2]) else: mode = parts[2] self.binds[parts[0]] = {'bind': parts[1], 'mode': mode } From 6e37f1dcef0e38ea6b9222cf49aa66df0e3a3c45 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 12 Nov 2015 09:39:37 -0800 Subject: [PATCH 097/200] fixed remote_src support, now actually copies and does not move --- files/copy.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/files/copy.py b/files/copy.py index da976f9a692..812b2d9ab7d 100644 --- a/files/copy.py +++ b/files/copy.py @@ -19,7 +19,7 @@ # along with Ansible. If not, see . import os -import time +import tempfile DOCUMENTATION = ''' --- @@ -214,7 +214,8 @@ def main(): backup = dict(default=False, type='bool'), force = dict(default=True, aliases=['thirsty'], type='bool'), validate = dict(required=False, type='str'), - directory_mode = dict(required=False) + directory_mode = dict(required=False), + remote_src = dict(required=False, type='bool'), ), add_file_common_args=True, supports_check_mode=True, @@ -228,6 +229,7 @@ def main(): validate = module.params.get('validate',None) follow = module.params['follow'] mode = module.params['mode'] + remote_src = module.params['remote_src'] if not os.path.exists(src): module.fail_json(msg="Source %s failed to transfer" % (src)) @@ -307,7 +309,12 @@ def main(): (rc,out,err) = module.run_command(validate % src) if rc != 0: module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc,err)) - module.atomic_move(src, dest) + if remote_src: + tmpdest = tempfile.mkstemp(dir=os.basedir(dest)) + shutil.copy2(src, tmpdest) + module.atomic_move(tmpdest, dest) + else: + module.atomic_move(src, dest) except IOError: module.fail_json(msg="failed to copy: %s to %s" % (src, dest)) changed = True From 3193961cf5d7088cee6716f93fd642b763995fd9 Mon Sep 17 00:00:00 2001 From: Marcin Stolarek Date: Fri, 13 Nov 2015 10:45:27 +0100 Subject: [PATCH 098/200] It may be string with int comparison, if ansible user specifies identifier as int --- cloud/amazon/route53.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 9b867fb1e72..443b71be921 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -389,7 +389,7 @@ def main(): decoded_name = rset.name.replace(r'\052', '*') decoded_name = decoded_name.replace(r'\100', '@') - if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == identifier_in: + if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == str(identifier_in): found_record = True record['zone'] = zone_in record['type'] = rset.type From b6223ca729b0f4fb238eb30a31447c6a55fbca95 Mon Sep 17 00:00:00 2001 From: Marcin Stolarek Date: Fri, 13 Nov 2015 11:32:10 +0100 Subject: [PATCH 099/200] Save changes of special characters to rset, without that comparison rset.to_xml() == wanted_rset.to_xml() will fail if record contains * or @ characters. --- cloud/amazon/route53.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 443b71be921..ec4dc533005 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -388,6 +388,8 @@ def main(): # tripping of things like * and @. decoded_name = rset.name.replace(r'\052', '*') decoded_name = decoded_name.replace(r'\100', '@') + #Need to save this changes in rset, because of comparing rset.to_xml() == wanted_rset.to_xml() in next block + rset.name = decoded_name if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == str(identifier_in): found_record = True From 572771d0b1eb6d94ea9a596b7a719d3a2d0b651b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 13 Nov 2015 16:46:32 -0500 Subject: [PATCH 100/200] Version bump for new beta 2.0.0-0.5.beta3 --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index f802f1a2cdb..47c909bbc53 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0.0-0.4.beta2 +2.0.0-0.5.beta3 From 21f6390fa34ca0e0a4736f4f2803b22356953d0f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 17 Nov 2015 10:05:15 -0800 Subject: [PATCH 101/200] clarified set_fact function --- utilities/logic/set_fact.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/utilities/logic/set_fact.py b/utilities/logic/set_fact.py index f05dbf76795..3dc729d07dc 100644 --- a/utilities/logic/set_fact.py +++ b/utilities/logic/set_fact.py @@ -24,9 +24,8 @@ author: "Dag Wieers (@dagwieers)" module: set_fact short_description: Set host facts from a task description: - - This module allows setting new variables. Variables are set on a host-by-host basis - just like facts discovered by the setup module. - - These variables will survive between plays. + - This module allows setting new variables. Variables are set on a host-by-host basis just like facts discovered by the setup module. + - These variables will survive between plays during an Ansible run, but will not be saved across executions even if you use a fact cache. options: key_value: description: From e9c548da417f90b990aff3a2036abc52abb2bf37 Mon Sep 17 00:00:00 2001 From: Maarten Claes Date: Wed, 18 Nov 2015 13:12:59 +0100 Subject: [PATCH 102/200] This fixes copy with the remote_src option It was broken in 6e37f1dcef0 when the remote_src was added. Need to pass the absolute path to copy2 instead of a tuple. --- files/copy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/copy.py b/files/copy.py index 812b2d9ab7d..5dd1e9935e6 100644 --- a/files/copy.py +++ b/files/copy.py @@ -310,7 +310,7 @@ def main(): if rc != 0: module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc,err)) if remote_src: - tmpdest = tempfile.mkstemp(dir=os.basedir(dest)) + _, tmpdest = tempfile.mkstemp(dir=os.path.dirname(dest)) shutil.copy2(src, tmpdest) module.atomic_move(tmpdest, dest) else: From 4bc834485ae648596ad443d6527d77844c2c54e5 Mon Sep 17 00:00:00 2001 From: Tom Paine Date: Wed, 18 Nov 2015 19:45:32 +0000 Subject: [PATCH 103/200] Update ec2_elb_lb.py --- cloud/amazon/ec2_elb_lb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 8488b78a110..1d9b2db283e 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -107,7 +107,7 @@ options: description: - Wait a specified timeout allowing connections to drain before terminating an instance required: false - default: "no" + default: "None" aliases: [] version_added: "1.8" idle_timeout: From 2a306c6b8cb4beeacc06a9ba37311c96b17d3413 Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Wed, 18 Nov 2015 14:55:17 -0500 Subject: [PATCH 104/200] Fix os_user_group module This module had a couple of errors in it. Also added check mode support. --- cloud/openstack/os_user_group.py | 51 +++++++++++++++++++------------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/cloud/openstack/os_user_group.py b/cloud/openstack/os_user_group.py index 37b76933c38..b2be24c74b2 100644 --- a/cloud/openstack/os_user_group.py +++ b/cloud/openstack/os_user_group.py @@ -17,7 +17,6 @@ try: import shade - from shade import meta HAS_SHADE = True except ImportError: HAS_SHADE = False @@ -28,6 +27,7 @@ module: os_user_group short_description: Associate OpenStack Identity users and groups extends_documentation_fragment: openstack version_added: "2.0" +author: "Monty Taylor (@emonty)" description: - Add and remove users from groups options: @@ -51,57 +51,66 @@ requirements: EXAMPLES = ''' # Add the demo user to the demo group -- os_user_group: user=demo group=demo +- os_user_group: + cloud: mycloud + user: demo + group: demo ''' -def main(): +def _system_state_change(state, in_group): + if state == 'present' and not in_group: + return True + if state == 'absent' and in_group: + return True + return False +def main(): argument_spec = openstack_full_argument_spec( - argument_spec = dict( user=dict(required=True), group=dict(required=True), state=dict(default='present', choices=['absent', 'present']), - )) + ) module_kwargs = openstack_module_kwargs() - module = AnsibleModule(argument_spec, **module_kwargs) + module = AnsibleModule(argument_spec, + supports_check_mode=True, + **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') - user = module.params.pop('user') - group = module.params.pop('group') - state = module.params.pop('state') + user = module.params['user'] + group = module.params['group'] + state = module.params['state'] try: - cloud = shade.openstack_cloud(**module.params) + cloud = shade.operator_cloud(**module.params) in_group = cloud.is_user_in_group(user, group) - if state == 'present': + if module.check_mode: + module.exit_json(changed=_system_state_change(state, in_group)) - if in_group: - changed = False - else: - cloud.add_user_to_group( - user_name_or_id=user, group_name_or_id=group) + changed = False + if state == 'present': + if not in_group: + cloud.add_user_to_group(user, group) changed = True + elif state == 'absent': if in_group: - cloud.remove_user_from_group( - user_name_or_id=user, group_name_or_id=group) + cloud.remove_user_from_group(user, group) changed=True - else: - changed=False + module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message, extra_data=e.extra_data) + from ansible.module_utils.basic import * from ansible.module_utils.openstack import * - if __name__ == '__main__': main() From 062c7764e63e7ff59efe80cd1bf4887eb625ae3c Mon Sep 17 00:00:00 2001 From: Mike Riddle Date: Wed, 18 Nov 2015 15:10:15 -0500 Subject: [PATCH 105/200] Fixed error message: TypeError: fail_json() takes exactly 1 argument (2 given) --- cloud/amazon/iam_policy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index d5b3daa7d5e..7038612d88e 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -188,7 +188,7 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state): # Role doesn't exist so it's safe to assume the policy doesn't either module.exit_json(changed=False) else: - module.fail_json(e.message) + module.fail_json(msg=e.message) try: for pol in current_policies: From 1d6b31a90f8569029afea5dc8e459529b8c976fb Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 19 Nov 2015 09:28:40 -0800 Subject: [PATCH 106/200] fixed break order --- cloud/amazon/ec2_elb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py index 5b3b24dacc2..4e19a054bd1 100644 --- a/cloud/amazon/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -266,9 +266,9 @@ class ElbManager: try: newelbs = elb.get_all_load_balancers(marker=marker) marker = newelbs.next_marker + elbs.extend(newelbs) if not marker: break - elbs.extend(newelbs) except TypeError: # Older version of boto do not allow for params elbs = elb.get_all_load_balancers() From ade721cc5d3383dd78278041429db30f004a8e02 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Wed, 18 Nov 2015 17:52:05 -0500 Subject: [PATCH 107/200] Remove note about only using win_copy for small files. --- windows/win_copy.py | 10 ---------- 1 file changed, 10 deletions(-) mode change 100644 => 100755 windows/win_copy.py diff --git a/windows/win_copy.py b/windows/win_copy.py old mode 100644 new mode 100755 index acc6c9ef2e0..a222a928f09 --- a/windows/win_copy.py +++ b/windows/win_copy.py @@ -44,16 +44,6 @@ options: required: true default: null author: "Jon Hawkesworth (@jhawkesworth)" -notes: - - The "win_copy" module is best used for small files only. - This module should **not** be used for files bigger than 3Mb as - this will result in a 500 response from the winrm host - and it will not be possible to connect via winrm again until the - windows remote management service has been restarted on the - windows host. - Files larger than 1Mb will take minutes to transfer. - The recommended way to transfer large files is using win_get_url - or collecting from a windows file share folder. ''' EXAMPLES = ''' From 13343a88881e5ac174213a8227c263c4d18e6c95 Mon Sep 17 00:00:00 2001 From: Chris Streeter Date: Thu, 19 Nov 2015 14:00:35 -0800 Subject: [PATCH 108/200] Fix name of ssh_opts arg --- source_control/git.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source_control/git.py b/source_control/git.py index d42b284abc5..bdc87b034a2 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -55,7 +55,7 @@ options: version_added: "1.5" description: - if C(yes), adds the hostkey for the repo url if not already - added. If ssh_args contains "-o StrictHostKeyChecking=no", + added. If ssh_opts contains "-o StrictHostKeyChecking=no", this parameter is ignored. ssh_opts: required: false From 3b8147af3044ed9c5d628e2f92e4795688f742c2 Mon Sep 17 00:00:00 2001 From: Daniel Donckers Date: Fri, 20 Nov 2015 14:55:39 -0600 Subject: [PATCH 109/200] Fixes #822 --- cloud/amazon/route53.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index ec4dc533005..72eac85b4fc 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -295,7 +295,7 @@ def main(): overwrite = dict(required=False, type='bool'), retry_interval = dict(required=False, default=500), private_zone = dict(required=False, type='bool', default=False), - identifier = dict(required=False), + identifier = dict(required=False, default=None), weight = dict(required=False, type='int'), region = dict(required=False), health_check = dict(required=False), @@ -391,7 +391,7 @@ def main(): #Need to save this changes in rset, because of comparing rset.to_xml() == wanted_rset.to_xml() in next block rset.name = decoded_name - if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == str(identifier_in): + if rset.type == type_in and decoded_name.lower() == record_in.lower() and str(rset.identifier) == str(identifier_in): found_record = True record['zone'] = zone_in record['type'] = rset.type From 39ef6a1a80d0bc0c3c2b58d39fb8b59959baa17d Mon Sep 17 00:00:00 2001 From: Keith Hassen Date: Sun, 22 Nov 2015 21:53:21 -0500 Subject: [PATCH 110/200] Fail if any group name is not resolved to an ID. --- cloud/amazon/ec2.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index ac2d58064be..b14c3c7d961 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -883,6 +883,9 @@ def create_instances(module, ec2, vpc, override_count=None): grp_details = ec2.get_all_security_groups() if isinstance(group_name, basestring): group_name = [group_name] + unmatched = list(set(group_name) - set([str(grp.name) for grp in grp_details])) + if len(unmatched) > 0: + module.fail_json(msg="the following group names are not valid: %s" % ','.join(unmatched)) group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] # Now we try to lookup the group id testing if group exists. elif group_id: From 1bc0b6ee6a1f70a2e29c33e1f77414b19e2fb126 Mon Sep 17 00:00:00 2001 From: Charles Paul Date: Mon, 23 Nov 2015 22:51:08 +0900 Subject: [PATCH 111/200] create non-existent ini file fixing fail_json more verbose fail msg --- files/ini_file.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/files/ini_file.py b/files/ini_file.py index ce286741981..82d4621dfbb 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -97,6 +97,7 @@ EXAMPLES = ''' import ConfigParser import sys +import os # ============================================================== # do_ini @@ -104,6 +105,11 @@ import sys def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False): + if not os.path.exists(filename): + try: + open(filename,'w').close() + except: + module.fail_json(msg="Destination file %s not writable" % filename) ini_file = open(filename, 'r') try: ini_lines = ini_file.readlines() From d13741314ea3fceaef4ef69eab6b1a7b4da9e901 Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Mon, 23 Nov 2015 11:29:00 -0500 Subject: [PATCH 112/200] Bug fix for os_image and min_disk/min_ram The min_disk and min_ram parameters were not being passed to the shade API. They also need to be integer values. Also updated the description of these parameters for better clarification. --- cloud/openstack/os_image.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cloud/openstack/os_image.py b/cloud/openstack/os_image.py index 076ea806396..b83e98859e7 100644 --- a/cloud/openstack/os_image.py +++ b/cloud/openstack/os_image.py @@ -56,12 +56,12 @@ options: default: None min_disk: description: - - The minimum disk space required to deploy this image + - The minimum disk space (in GB) required to boot this image required: false default: None min_ram: description: - - The minimum ram required to deploy this image + - The minimum ram (in MB) required to boot this image required: false default: None is_public: @@ -125,8 +125,8 @@ def main(): disk_format = dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']), container_format = dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova']), owner = dict(default=None), - min_disk = dict(default=None), - min_ram = dict(default=None), + min_disk = dict(type='int', default=0), + min_ram = dict(type='int', default=0), is_public = dict(default=False), filename = dict(default=None), ramdisk = dict(default=None), @@ -156,6 +156,8 @@ def main(): wait=module.params['wait'], timeout=module.params['timeout'], is_public=module.params['is_public'], + min_disk=module.params['min_disk'], + min_ram=module.params['min_ram'] ) changed = True if not module.params['wait']: From 6bd8020f65928cda1dbebf8594d1ead8f439b9f2 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 24 Nov 2015 09:32:46 -0800 Subject: [PATCH 113/200] corrected version_added, removed empty alias --- cloud/amazon/ec2.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index eaaec90002a..cfd4991f526 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -248,12 +248,11 @@ options: default: null aliases: ['network_interface'] spot_launch_group: - version_added: "2.0" + version_added: "2.1" description: - Launch group for spot request, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group) required: false default: null - aliases: [] author: - "Tim Gerla (@tgerla)" From 490038b0e4a9db324bff9e5cc69e790c0ab52bb2 Mon Sep 17 00:00:00 2001 From: Charles Ferguson Date: Mon, 23 Nov 2015 23:42:40 +0000 Subject: [PATCH 114/200] Update documentation of the 'pkg' and 'state' parameters in yum. The yum module allows the 'name' parameter to be given as 'pkg', in a similar way to some of the other package managers. This change documents this alias. The module's 'state' parameter has two other aliases, in line with the 'apt' action; the 'state' parameter can take 'installed' as an alias for 'present', and 'removed' as an alias for 'absent'. These aliases are documented. --- packaging/os/yum.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index e1e3341a075..e0b598a410c 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -51,7 +51,7 @@ options: - "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: yum -y update. You can also pass a url or a local path to a rpm file. To operate on several packages this can accept a comma separated list of packages or (as of 2.0) a list of packages." required: true default: null - aliases: [] + aliases: [ 'pkg' ] exclude: description: - "Package name(s) to exclude when state=present, or latest" @@ -65,9 +65,9 @@ options: default: null state: description: - - Whether to install (C(present), C(latest)), or remove (C(absent)) a package. + - Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package. required: false - choices: [ "present", "latest", "absent" ] + choices: [ "present", "installed", "latest", "absent", "removed" ] default: "present" enablerepo: description: From 8ae30f1822cb73ac54eb9e19d922d605a78ed098 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 23 Nov 2015 16:23:41 -0800 Subject: [PATCH 115/200] Minor simplification of code --- cloud/amazon/ec2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 6dcc97bc5c8..e035e07af24 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -891,9 +891,9 @@ def create_instances(module, ec2, vpc, override_count=None): grp_details = ec2.get_all_security_groups() if isinstance(group_name, basestring): group_name = [group_name] - unmatched = list(set(group_name) - set([str(grp.name) for grp in grp_details])) + unmatched = set(group_name).difference(str(grp.name) for grp in grp_details) if len(unmatched) > 0: - module.fail_json(msg="the following group names are not valid: %s" % ','.join(unmatched)) + module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched)) group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] # Now we try to lookup the group id testing if group exists. elif group_id: From fca36415d6df5f5877a2db72d3f97056ba9e2f65 Mon Sep 17 00:00:00 2001 From: Charles Ferguson Date: Tue, 24 Nov 2015 15:55:31 +0000 Subject: [PATCH 116/200] Update the documentation of the 'apt' action for the 'name'. The package name has two aliases, 'package' and 'pkg'. Add them to the documentation. --- packaging/os/apt.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index b5c363ab1f5..3fe9c62c07d 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -32,6 +32,7 @@ options: - A package name, like C(foo), or package specifier with version, like C(foo=1.0). Name wildcards (fnmatch) like C(apt*) and version wildcards like C(foo=1.0*) are also supported. Note that the apt-get commandline supports implicit regex matches here but we do not because it can let typos through easier (If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for the user. Since we don't have warnings and prompts before installing we disallow this. Use an explicit fnmatch pattern if you want wildcarding) required: false default: null + aliases: [ 'pkg', 'package' ] state: description: - Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies are installed. From ab420300efa1cba981cb39049a9ca56ccc303420 Mon Sep 17 00:00:00 2001 From: joshuaeke Date: Tue, 24 Nov 2015 18:48:59 +0000 Subject: [PATCH 117/200] Update ec2.py remove state tag 'exact_count' and 'state' are mutually exclusive options they should not be in the following examples: - # Enforce that 5 running instances named "database" with a "dbtype" of "postgres" example and - # Enforce that 5 instances with a tag "foo" are running --- cloud/amazon/ec2.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index e035e07af24..04aa656d37e 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -488,7 +488,6 @@ EXAMPLES = ''' # - ec2: - state: running key_name: mykey instance_type: c1.medium image: ami-40603AD1 @@ -506,7 +505,6 @@ EXAMPLES = ''' # - ec2: - state: running key_name: mykey instance_type: c1.medium image: ami-40603AD1 From f1f201c234d91e186e56d4cc53fbb018d905c0a4 Mon Sep 17 00:00:00 2001 From: Max Rothman Date: Tue, 24 Nov 2015 14:26:17 -0500 Subject: [PATCH 118/200] Fix rds "promote" command never promoting Previously, the `promote` command in the `rds` module would always return OK and never actually promote an instance. This was because `promote_db_instance()` had its conditions backwards: if the instance had the `replication_source` attribute indicating that it **was** a replica, it would set `changed = False` and do nothing. If the instance **wasn't** a replica, it would attempt to run `boto.rds.promote_read_replica()`, which would always fail. --- cloud/amazon/rds.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index d8f5a2cea86..19f0bbe58d6 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -829,13 +829,17 @@ def promote_db_instance(module, conn): instance_name = module.params.get('instance_name') result = conn.get_db_instance(instance_name) + if not result: + module.fail_json(msg="DB Instance %s does not exist" % instance_name) + if result.get_data().get('replication_source'): - changed = False - else: try: result = conn.promote_read_replica(instance_name, **params) + changed = True except RDSException, e: module.fail_json(msg=e.message) + else: + changed = False if module.params.get('wait'): resource = await_resource(conn, result, 'available', module) From 129bac3649c65137f3a821309de05dc85dae1dc3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Andr=C3=A9?= Date: Wed, 25 Nov 2015 16:54:11 +0900 Subject: [PATCH 119/200] Fix typo in ping module short description --- system/ping.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/ping.py b/system/ping.py index 1449cf5dca9..ed93f7dfe11 100644 --- a/system/ping.py +++ b/system/ping.py @@ -23,7 +23,7 @@ DOCUMENTATION = ''' --- module: ping version_added: historical -short_description: Try to connect to host, veryify a usable python and return C(pong) on success. +short_description: Try to connect to host, verify a usable python and return C(pong) on success. description: - A trivial test module, this module always returns C(pong) on successful contact. It does not make sense in playbooks, but it is useful from From b07ff99cace891084d3e978ddfd1953d0145cc0e Mon Sep 17 00:00:00 2001 From: Michel Alexandre Salim Date: Wed, 25 Nov 2015 15:08:48 +0700 Subject: [PATCH 120/200] Fix ec2_snapshot documentation last_snapshot_min_age is added in 2.0, not 1.9 --- cloud/amazon/ec2_snapshot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_snapshot.py b/cloud/amazon/ec2_snapshot.py index 09fa0d90389..a3200efe847 100644 --- a/cloud/amazon/ec2_snapshot.py +++ b/cloud/amazon/ec2_snapshot.py @@ -74,7 +74,7 @@ options: - If the volume's most recent snapshot has started less than `last_snapshot_min_age' minutes ago, a new snapshot will not be created. required: false default: 0 - version_added: "1.9" + version_added: "2.0" author: "Will Thames (@willthames)" extends_documentation_fragment: From b2ace272a538bf6cd9cc9c2f3a43120438d55b2a Mon Sep 17 00:00:00 2001 From: Lippy Lee Date: Wed, 25 Nov 2015 22:10:31 +0800 Subject: [PATCH 121/200] Make digital_ocean_domain use API v2 --- cloud/digital_ocean/digital_ocean_domain.py | 30 +++++++++------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/cloud/digital_ocean/digital_ocean_domain.py b/cloud/digital_ocean/digital_ocean_domain.py index 3b7a2dce236..d44c4d71134 100644 --- a/cloud/digital_ocean/digital_ocean_domain.py +++ b/cloud/digital_ocean/digital_ocean_domain.py @@ -29,12 +29,9 @@ options: - Indicate desired state of the target. default: present choices: ['present', 'absent'] - client_id: - description: - - DigitalOcean manager id. - api_key: + api_token: description: - - DigitalOcean api key. + - DigitalOcean api token. id: description: - Numeric, the droplet id you want to operate on. @@ -46,8 +43,8 @@ options: - The IP address to point a domain at. notes: - - Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY. - - Version 1 of DigitalOcean API is used. + - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token. + - Version 2 of DigitalOcean API is used. requirements: - "python >= 2.6" @@ -68,9 +65,9 @@ EXAMPLES = ''' - digital_ocean: > state=present name=test_droplet - size_id=1 - region_id=2 - image_id=3 + size_id=1gb + region_id=sgp1 + image_id=ubuntu-14-04-x64 register: test_droplet - digital_ocean_domain: > @@ -135,8 +132,8 @@ class Domain(JsonfyMixIn): return cls(json) @classmethod - def setup(cls, client_id, api_key): - cls.manager = DoManager(client_id, api_key) + def setup(cls, api_token): + cls.manager = DoManager(None, api_token, api_version=2) DomainRecord.manager = cls.manager @classmethod @@ -171,16 +168,14 @@ def core(module): return v try: - # params['client_id'] will be None even if client_id is not passed in - client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID'] - api_key = module.params['api_key'] or os.environ['DO_API_KEY'] + api_token = module.params['api_token'] or os.environ['DO_API_TOKEN'] or os.environ['DO_API_KEY'] except KeyError, e: module.fail_json(msg='Unable to load %s' % e.message) changed = True state = module.params['state'] - Domain.setup(client_id, api_key) + Domain.setup(api_token) if state in ('present'): domain = Domain.find(id=module.params["id"]) @@ -223,8 +218,7 @@ def main(): module = AnsibleModule( argument_spec = dict( state = dict(choices=['present', 'absent'], default='present'), - client_id = dict(aliases=['CLIENT_ID'], no_log=True), - api_key = dict(aliases=['API_KEY'], no_log=True), + api_token = dict(aliases=['API_TOKEN'], no_log=True), name = dict(type='str'), id = dict(aliases=['droplet_id'], type='int'), ip = dict(type='str'), From c428483b244123d3cc2d57ba1d7636d119ee5536 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 26 Nov 2015 09:41:33 -0800 Subject: [PATCH 122/200] updated docs to denote retirement of v1 api and clarify when and why auth fields have changed --- cloud/digital_ocean/digital_ocean_domain.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/digital_ocean/digital_ocean_domain.py b/cloud/digital_ocean/digital_ocean_domain.py index d44c4d71134..70d7e300df3 100644 --- a/cloud/digital_ocean/digital_ocean_domain.py +++ b/cloud/digital_ocean/digital_ocean_domain.py @@ -32,6 +32,7 @@ options: api_token: description: - DigitalOcean api token. + version_added: "1.9.5" id: description: - Numeric, the droplet id you want to operate on. @@ -44,7 +45,8 @@ options: notes: - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token. - - Version 2 of DigitalOcean API is used. + - As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(api_token). + - If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired. requirements: - "python >= 2.6" From cab97cd2d9ca0931e71d5eb17557e2a78b701cc5 Mon Sep 17 00:00:00 2001 From: Jay Rogers Date: Wed, 25 Nov 2015 18:46:17 -0600 Subject: [PATCH 123/200] Update in Amazon IAM Policy Documentation There were typos in the documentation that made the examples seem misleading of what was being demonstrated. This update fixes that. --- cloud/amazon/iam_policy.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index 7038612d88e..44a708c9a66 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -64,9 +64,9 @@ extends_documentation_fragment: ''' EXAMPLES = ''' -# Create and policy with the name of 'Admin' to the group 'administrators' +# Create a policy with the name of 'Admin' to the group 'administrators' tasks: -- name: Create two new IAM users with API keys +- name: Assign a policy called Admin to the administrators group iam_policy: iam_type: group iam_name: administrators @@ -87,7 +87,7 @@ task: - Luigi register: new_groups -- name: +- name: Apply READ-ONLY policy to new groups that have been recently created iam_policy: iam_type: group iam_name: "{{ item.created_group.group_name }}" From 9c30ef8926b869e6f5b16c1a484dfbcb1b1ccb3a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 25 Nov 2015 23:48:02 -0800 Subject: [PATCH 124/200] doc updates - added version_added to new api_token - updated notes to explain API issues, option switch and versions affected. --- cloud/digital_ocean/digital_ocean.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cloud/digital_ocean/digital_ocean.py b/cloud/digital_ocean/digital_ocean.py index d7b55bee693..3b2d1a16484 100644 --- a/cloud/digital_ocean/digital_ocean.py +++ b/cloud/digital_ocean/digital_ocean.py @@ -37,6 +37,7 @@ options: api_token: description: - DigitalOcean api token. + version_added: "1.9.5" id: description: - Numeric, the droplet id you want to operate on. @@ -100,8 +101,9 @@ options: notes: - Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token. - - As of Ansible 2.0, Version 2 of the DigitalOcean API is used. - - As of Ansible 2.0, the above parameters were changed significantly. If you are running 1.9.x or earlier, please use C(ansible-doc digital_ocean) to view the correct parameters for your version. Dedicated web docs will be available in the near future for the stable branch. + - As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(api_token). + - If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired. + Upgrade Ansible or, if unable to, try downloading the latest version of this module from github and putting it into a 'library' directory. requirements: - "python >= 2.6" - dopy From 3d14397dc0a92f6db9e0b8a242ce7308f06ed5ee Mon Sep 17 00:00:00 2001 From: = Date: Thu, 26 Nov 2015 08:58:34 +0000 Subject: [PATCH 125/200] Fix for 13315 - minute now included in ansible_date_time on windows hosts --- windows/setup.ps1 | 1 + 1 file changed, 1 insertion(+) diff --git a/windows/setup.ps1 b/windows/setup.ps1 index 4d163c7ec26..b31e76684b4 100644 --- a/windows/setup.ps1 +++ b/windows/setup.ps1 @@ -68,6 +68,7 @@ Set-Attr $date "year" (Get-Date -format yyyy) Set-Attr $date "month" (Get-Date -format MM) Set-Attr $date "day" (Get-Date -format dd) Set-Attr $date "hour" (Get-Date -format HH) +Set-Attr $date "minute" (Get-Date -format mm) Set-Attr $date "iso8601" (Get-Date -format s) Set-Attr $result.ansible_facts "ansible_date_time" $date From 32edc2f56f27ee5859c5657ec69e14b566b4da07 Mon Sep 17 00:00:00 2001 From: Markus Suonto Date: Thu, 26 Nov 2015 15:34:47 +0200 Subject: [PATCH 126/200] fixed quantum_ modules to work with minimum access rights if greater access rights are not needed --- cloud/openstack/_quantum_network.py | 11 +++++------ cloud/openstack/_quantum_router.py | 15 +++++++-------- cloud/openstack/_quantum_router_interface.py | 13 ++++++------- cloud/openstack/_quantum_subnet.py | 12 ++++++------ 4 files changed, 24 insertions(+), 27 deletions(-) diff --git a/cloud/openstack/_quantum_network.py b/cloud/openstack/_quantum_network.py index 93b10880823..a0a29e6a062 100644 --- a/cloud/openstack/_quantum_network.py +++ b/cloud/openstack/_quantum_network.py @@ -164,18 +164,17 @@ def _get_neutron_client(module, kwargs): def _set_tenant_id(module): global _os_tenant_id if not module.params['tenant_name']: - tenant_name = module.params['login_tenant_name'] + _os_tenant_id = _os_keystone.tenant_id else: tenant_name = module.params['tenant_name'] - for tenant in _os_keystone.tenants.list(): - if tenant.name == tenant_name: - _os_tenant_id = tenant.id - break + for tenant in _os_keystone.tenants.list(): + if tenant.name == tenant_name: + _os_tenant_id = tenant.id + break if not _os_tenant_id: module.fail_json(msg = "The tenant id cannot be found, please check the parameters") - def _get_net_id(neutron, module): kwargs = { 'tenant_id': _os_tenant_id, diff --git a/cloud/openstack/_quantum_router.py b/cloud/openstack/_quantum_router.py index 252e1618d90..0c4d2063017 100644 --- a/cloud/openstack/_quantum_router.py +++ b/cloud/openstack/_quantum_router.py @@ -136,17 +136,16 @@ def _get_neutron_client(module, kwargs): def _set_tenant_id(module): global _os_tenant_id if not module.params['tenant_name']: - login_tenant_name = module.params['login_tenant_name'] + _os_tenant_id = _os_keystone.tenant_id else: - login_tenant_name = module.params['tenant_name'] + tenant_name = module.params['tenant_name'] - for tenant in _os_keystone.tenants.list(): - if tenant.name == login_tenant_name: - _os_tenant_id = tenant.id - break + for tenant in _os_keystone.tenants.list(): + if tenant.name == tenant_name: + _os_tenant_id = tenant.id + break if not _os_tenant_id: - module.fail_json(msg = "The tenant id cannot be found, please check the parameters") - + module.fail_json(msg = "The tenant id cannot be found, please check the parameters") def _get_router_id(module, neutron): kwargs = { diff --git a/cloud/openstack/_quantum_router_interface.py b/cloud/openstack/_quantum_router_interface.py index 4073c7d3b10..c936e98ad65 100644 --- a/cloud/openstack/_quantum_router_interface.py +++ b/cloud/openstack/_quantum_router_interface.py @@ -138,18 +138,17 @@ def _get_neutron_client(module, kwargs): def _set_tenant_id(module): global _os_tenant_id if not module.params['tenant_name']: - login_tenant_name = module.params['login_tenant_name'] + _os_tenant_id = _os_keystone.tenant_id else: - login_tenant_name = module.params['tenant_name'] + tenant_name = module.params['tenant_name'] - for tenant in _os_keystone.tenants.list(): - if tenant.name == login_tenant_name: - _os_tenant_id = tenant.id - break + for tenant in _os_keystone.tenants.list(): + if tenant.name == tenant_name: + _os_tenant_id = tenant.id + break if not _os_tenant_id: module.fail_json(msg = "The tenant id cannot be found, please check the parameters") - def _get_router_id(module, neutron): kwargs = { 'name': module.params['router_name'], diff --git a/cloud/openstack/_quantum_subnet.py b/cloud/openstack/_quantum_subnet.py index 105ca32c582..f2f125f64c8 100644 --- a/cloud/openstack/_quantum_subnet.py +++ b/cloud/openstack/_quantum_subnet.py @@ -170,16 +170,16 @@ def _get_neutron_client(module, kwargs): def _set_tenant_id(module): global _os_tenant_id if not module.params['tenant_name']: - tenant_name = module.params['login_tenant_name'] + _os_tenant_id = _os_keystone.tenant_id else: tenant_name = module.params['tenant_name'] - for tenant in _os_keystone.tenants.list(): - if tenant.name == tenant_name: - _os_tenant_id = tenant.id - break + for tenant in _os_keystone.tenants.list(): + if tenant.name == tenant_name: + _os_tenant_id = tenant.id + break if not _os_tenant_id: - module.fail_json(msg = "The tenant id cannot be found, please check the parameters") + module.fail_json(msg = "The tenant id cannot be found, please check the parameters") def _get_net_id(neutron, module): kwargs = { From 19abe233fed3cb04ec3344f549c48f8ea661aeab Mon Sep 17 00:00:00 2001 From: "Veaceslav (Slava) Mindru" Date: Thu, 26 Nov 2015 08:48:42 -0500 Subject: [PATCH 127/200] Squashed commit of the following: commit 406214fad214359fcf13fe8c7cd3f8f8faac5386 commit 85d1c9b0a41dd075eb2683b1a7de595ca3119614 commit 4aa5049b5ae25dee71a248238201611a466a13c4 commit 65a96974c80aea1fef88d78e218ecb665d8113e1 commit 22ea5863d1dfd628735b46cc7de51c0fd33251de Refactoring --- system/authorized_key.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/system/authorized_key.py b/system/authorized_key.py index 8a97722b222..55c1ec432ca 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -80,6 +80,15 @@ options: choices: [ "yes", "no" ] default: "no" version_added: "1.9" + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only + set to C(no) used on personally controlled sites using self-signed + certificates. Prior to 2.0 the code defaulted to C(yes). + required: false + default: "yes" + choices: ["yes", "no"] + version_added: "2.0" description: - "Adds or removes authorized keys for particular user accounts" author: "Ansible Core Team" @@ -111,6 +120,11 @@ EXAMPLES = ''' key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" key_options='no-port-forwarding,from="10.0.1.1"' +# Using validate_certs: +- authorized_key: user=charlie + key=https://github.com/user.keys + validate_certs=no + # Set up authorized_keys exclusively with one key - authorized_key: user=root key="{{ item }}" state=present exclusive=yes @@ -358,6 +372,7 @@ def enforce_state(module, params): state = params.get("state", "present") key_options = params.get("key_options", None) exclusive = params.get("exclusive", False) + validate_certs = params.get("validate_certs", True) error_msg = "Error getting key from: %s" # if the key is a url, request it and use it as key source @@ -460,6 +475,7 @@ def main(): key_options = dict(required=False, type='str'), unique = dict(default=False, type='bool'), exclusive = dict(default=False, type='bool'), + validate_certs = dict(default=True, type='bool'), ), supports_check_mode=True ) From 9325c0ae5fde9bb035e7f03017b8ce8bcdd635a5 Mon Sep 17 00:00:00 2001 From: Charles Ferguson Date: Thu, 26 Nov 2015 15:40:09 +0000 Subject: [PATCH 128/200] Update documentation for 'file' module to include 'diff_peek'. The 'diff_peek' option isn't documented at all, and provides a rudimentary check that the content isn't binary. Documentation is added to explain the option. The 'validate' option has a declaration, but isn't implemented. Therefore it may as well be removed from the module. --- files/file.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/files/file.py b/files/file.py index 8219990d1f6..9e79d4acc61 100644 --- a/files/file.py +++ b/files/file.py @@ -87,6 +87,12 @@ options: - 'force the creation of the symlinks in two cases: the source file does not exist (but will appear later); the destination exists and is a file (so, we need to unlink the "path" file and create symlink to the "src" file in place of it).' + diff_peek: + required: false + description: + - "Only check whether the file looks like binary. Returns with the parameter + 'appears_binary' set to True or False depending on the initial content of the + file. This option is enabled when the option is set (to any value)." ''' EXAMPLES = ''' @@ -158,7 +164,6 @@ def main(): recurse = dict(default=False, type='bool'), force = dict(required=False, default=False, type='bool'), diff_peek = dict(default=None), - validate = dict(required=False, default=None), src = dict(required=False, default=None), ), add_file_common_args=True, From 660b47f62d0c528ab55b94f6f3aab70b6e863caf Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 27 Nov 2015 09:28:50 -0800 Subject: [PATCH 129/200] minor doc fixes and reformating updated validate_certs feature to be 2.1 --- system/authorized_key.py | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index 55c1ec432ca..3c8fb5791e5 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -81,14 +81,14 @@ options: default: "no" version_added: "1.9" validate_certs: - description: - - If C(no), SSL certificates will not be validated. This should only - set to C(no) used on personally controlled sites using self-signed - certificates. Prior to 2.0 the code defaulted to C(yes). + description: + - This only applies if using a https url as the source of the keys. If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site. + - Prior to 2.1 the code worked as if this was set to C(yes). required: false default: "yes" choices: ["yes", "no"] - version_added: "2.0" + version_added: "2.1" description: - "Adds or removes authorized keys for particular user accounts" author: "Ansible Core Team" @@ -102,32 +102,30 @@ EXAMPLES = ''' - authorized_key: user=charlie key=https://github.com/charlie.keys # Using alternate directory locations: -- authorized_key: user=charlie - key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" - path='/etc/ssh/authorized_keys/charlie' - manage_dir=no +- authorized_key: + user: charlie + key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" + path: '/etc/ssh/authorized_keys/charlie' + manage_dir: no # Using with_file - name: Set up authorized_keys for the deploy user - authorized_key: user=deploy - key="{{ item }}" + authorized_key: user=deploy key="{{ item }}" with_file: - public_keys/doe-jane - public_keys/doe-john # Using key_options: -- authorized_key: user=charlie - key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" - key_options='no-port-forwarding,from="10.0.1.1"' +- authorized_key: + user: charlie + key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" + key_options: 'no-port-forwarding,from="10.0.1.1"' # Using validate_certs: -- authorized_key: user=charlie - key=https://github.com/user.keys - validate_certs=no +- authorized_key: user=charlie key=https://github.com/user.keys validate_certs=no # Set up authorized_keys exclusively with one key -- authorized_key: user=root key="{{ item }}" state=present - exclusive=yes +- authorized_key: user=root key="{{ item }}" state=present exclusive=yes with_file: - public_keys/doe-jane ''' From 794911c592345c8343a38f7f90cc9ed2d727fb2f Mon Sep 17 00:00:00 2001 From: luto Date: Thu, 26 Nov 2015 13:32:27 +0100 Subject: [PATCH 130/200] vsphere_guest: support putting a guest into a nested folder --- cloud/vmware/vsphere_guest.py | 62 ++++++++++++++++++++++++++++++++--- 1 file changed, 57 insertions(+), 5 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index a14f807e049..fdfbac5876d 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -175,6 +175,10 @@ EXAMPLES = ''' size_gb: 10 type: thin datastore: storage001 + # VMs can be put into folders. The value given here is either the full path + # to the folder (e.g. production/customerA/lamp) or just the last component + # of the path (e.g. lamp): + folder: production/customerA/lamp vm_nic: nic1: type: vmxnet3 @@ -915,6 +919,48 @@ def reconfigure_net(vsphere_client, vm, module, esxi, resource_pool, guest, vm_n elif len(nics) == 0: return(False) + +def _build_folder_tree(nodes, parent): + tree = {} + + for node in nodes: + if node['parent'] == parent: + tree[node['name']] = dict.copy(node) + tree[node['name']]['subfolders'] = _build_folder_tree(nodes, node['id']) + del tree[node['name']]['parent'] + + return tree + + +def _find_path_in_tree(tree, path): + for name, o in tree.iteritems(): + if name == path[0]: + if len(path) == 1: + return o + else: + return _find_path_in_tree(o['subfolders'], path[1:]) + + return None + + +def _get_folderid_for_path(vsphere_client, datacenter, path): + content = vsphere_client._retrieve_properties_traversal(property_names=['name', 'parent'], obj_type=MORTypes.Folder) + if not content: return {} + + node_list = [ + { + 'id': o.Obj, + 'name': o.PropSet[0].Val, + 'parent': (o.PropSet[1].Val if len(o.PropSet) > 1 else None) + } for o in content + ] + + tree = _build_folder_tree(node_list, datacenter) + tree = _find_path_in_tree(tree, ['vm'])['subfolders'] + folder = _find_path_in_tree(tree, path.split('/')) + return folder['id'] if folder else None + + def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, vm_hw_version, state): datacenter = esxi['datacenter'] @@ -935,13 +981,19 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, # virtualmachineFolder managed object reference if vm_extra_config.get('folder'): - if vm_extra_config['folder'] not in vsphere_client._get_managed_objects(MORTypes.Folder).values(): + # try to find the folder by its full path, e.g. 'production/customerA/lamp' + vmfmor = _get_folderid_for_path(vsphere_client, dcmor, vm_extra_config.get('folder')) + + # try the legacy behaviour of just matching the folder name, so 'lamp' alone matches 'production/customerA/lamp' + if vmfmor is None: + for mor, name in vsphere_client._get_managed_objects(MORTypes.Folder).iteritems(): + if name == vm_extra_config['folder']: + vmfmor = mor + + # if neither of strategies worked, bail out + if vmfmor is None: vsphere_client.disconnect() module.fail_json(msg="Cannot find folder named: %s" % vm_extra_config['folder']) - - for mor, name in vsphere_client._get_managed_objects(MORTypes.Folder).iteritems(): - if name == vm_extra_config['folder']: - vmfmor = mor else: vmfmor = dcprops.vmFolder._obj From 176b4103b60698a9327bb388d217200d7bbc4818 Mon Sep 17 00:00:00 2001 From: Charles Ferguson Date: Fri, 27 Nov 2015 20:49:27 +0000 Subject: [PATCH 131/200] Add documentation to 'file' AnsibleModule definition for internals. The parameters 'diff_peek' and 'validate' are not expected to be used by users. They are internal. To make it clear, this change adds the comments 'Internal use only' to each of those definitions to make it clear that they are actually used, just not by end-users. --- files/file.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/files/file.py b/files/file.py index 9e79d4acc61..cc94922fe35 100644 --- a/files/file.py +++ b/files/file.py @@ -87,12 +87,6 @@ options: - 'force the creation of the symlinks in two cases: the source file does not exist (but will appear later); the destination exists and is a file (so, we need to unlink the "path" file and create symlink to the "src" file in place of it).' - diff_peek: - required: false - description: - - "Only check whether the file looks like binary. Returns with the parameter - 'appears_binary' set to True or False depending on the initial content of the - file. This option is enabled when the option is set (to any value)." ''' EXAMPLES = ''' @@ -163,7 +157,8 @@ def main(): original_basename = dict(required=False), # Internal use only, for recursive ops recurse = dict(default=False, type='bool'), force = dict(required=False, default=False, type='bool'), - diff_peek = dict(default=None), + diff_peek = dict(default=None), # Internal use only, for internal checks in the action plugins + validate = dict(required=False, default=None), # Internal use only, for template and copy src = dict(required=False, default=None), ), add_file_common_args=True, From ae582adce64bc0cf168118fe2f4c8f67d222d878 Mon Sep 17 00:00:00 2001 From: Sina Sadeghi Date: Sun, 29 Nov 2015 18:00:44 +1100 Subject: [PATCH 132/200] Update hostname.py Added support for FreeBSD. (http://www.freebsd.org) --- system/hostname.py | 57 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/system/hostname.py b/system/hostname.py index 0d4ca085b83..2d14b0893b7 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -399,6 +399,57 @@ class SolarisStrategy(GenericStrategy): # =========================================== +class FreeBSDStrategy(GenericStrategy): + """ + This is a FreeBSD hostname manipulation strategy class - it edits + the /etc/rc.conf.d/hostname file. + """ + + HOSTNAME_FILE = '/etc/rc.conf.d/hostname' + + def get_permanent_hostname(self): + + if not os.path.isfile(self.HOSTNAME_FILE): + try: + open(self.HOSTNAME_FILE, "a").write("hostname=temporarystub\n") + except IOError, err: + self.module.fail_json(msg="failed to write file: %s" % + str(err)) + try: + try: + f = open(self.HOSTNAME_FILE, 'r') + for line in f: + line = line.strip() + if line.startswith('hostname='): + return line[10:].strip('"') + except Exception, err: + self.module.fail_json(msg="failed to read hostname: %s" % str(err)) + finally: + f.close() + + return None + + def set_permanent_hostname(self, name): + try: + try: + f = open(self.HOSTNAME_FILE, 'r') + lines = [x.strip() for x in f] + + for i, line in enumerate(lines): + if line.startswith('hostname='): + lines[i] = 'hostname="%s"' % name + break + f.close() + + f = open(self.HOSTNAME_FILE, 'w') + f.write('\n'.join(lines) + '\n') + except Exception, err: + self.module.fail_json(msg="failed to update hostname: %s" % str(err)) + finally: + f.close() + +# =========================================== + class FedoraHostname(Hostname): platform = 'Linux' distribution = 'Fedora' @@ -541,6 +592,12 @@ class SolarisHostname(Hostname): distribution = None strategy_class = SolarisStrategy +class FreeBSDHostname(Hostname): + platform = 'FreeBSD' + distribution = None + strategy_class = FreeBSDStrategy + + # =========================================== def main(): From dc697bf533da90d341abb99b2b035f9d79dc58a4 Mon Sep 17 00:00:00 2001 From: Veaceslav Mindru Date: Sun, 29 Nov 2015 20:51:22 +0100 Subject: [PATCH 133/200] adding validate_certs for YUM. #2582 --- packaging/os/yum.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index e0b598a410c..bed962e0158 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -117,6 +117,16 @@ options: choices: ["yes", "no"] aliases: [] + validate_certs: + description: + - This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated. + - This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site. + - Prior to 2.1 the code worked as if this was set to C(yes). + required: false + default: "yes" + choices: ["yes", "no"] + version_added: "2.1" + notes: - When used with a loop of package names in a playbook, ansible optimizes the call to the yum module. Instead of calling the module with a single @@ -965,6 +975,7 @@ def main(): conf_file=dict(default=None), disable_gpg_check=dict(required=False, default="no", type='bool'), update_cache=dict(required=False, default="no", type='bool'), + validate_certs=dict(required=False, defaults="yes", type='bool'), # this should not be needed, but exists as a failsafe install_repoquery=dict(required=False, default="yes", type='bool'), ), From c6fdd3809f8bc2a81142197db7fd1acd9f1f0305 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 30 Nov 2015 07:23:29 -0800 Subject: [PATCH 134/200] fixed typo EEXISTS is actually EEXIST fixes #2585 --- files/file.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/file.py b/files/file.py index cc94922fe35..428565579b8 100644 --- a/files/file.py +++ b/files/file.py @@ -288,7 +288,7 @@ def main(): except OSError, ex: # Possibly something else created the dir since the os.path.exists # check above. As long as it's a dir, we don't need to error out. - if not (ex.errno == errno.EEXISTS and os.isdir(curpath)): + if not (ex.errno == errno.EEXIST and os.isdir(curpath)): raise tmp_file_args = file_args.copy() tmp_file_args['path']=curpath From bfa7cdb5c4ecde7e0f75a8ca2bc89dcf70207317 Mon Sep 17 00:00:00 2001 From: Dylan Martin Date: Mon, 30 Nov 2015 11:47:38 -0800 Subject: [PATCH 135/200] improved error message when no handler found --- files/unarchive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/unarchive.py b/files/unarchive.py index ac35ea58d88..d5df63a8def 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -250,7 +250,7 @@ def pick_handler(src, dest, module): obj = handler(src, dest, module) if obj.can_handle_archive(): return obj - module.fail_json(msg='Failed to find handler to unarchive. Make sure the required command to extract the file is installed.') + module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed.' % src) def main(): From cd9a7667aa39bbc1ccd606ebebaf3c62f228d601 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 30 Nov 2015 19:02:28 -0800 Subject: [PATCH 136/200] Don't raise or catch StandardError in amazon modules --- cloud/amazon/ec2_asg.py | 58 +++++++++++------------- cloud/amazon/ec2_elb.py | 9 ++-- cloud/amazon/ec2_elb_lb.py | 5 +- cloud/amazon/ec2_lc.py | 2 +- cloud/amazon/ec2_metric_alarm.py | 9 ++-- cloud/amazon/ec2_scaling_policy.py | 5 +- cloud/amazon/ec2_vol.py | 73 ++++++++++++++---------------- cloud/amazon/ec2_vpc_net.py | 49 ++++++++++---------- cloud/amazon/rds_param_group.py | 8 ++-- 9 files changed, 104 insertions(+), 114 deletions(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 39444c73c03..6564c4c26bb 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -152,9 +152,9 @@ EXAMPLES = ''' # Rolling ASG Updates -Below is an example of how to assign a new launch config to an ASG and terminate old instances. +Below is an example of how to assign a new launch config to an ASG and terminate old instances. -All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in +All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in a rolling fashion with instances using the current launch configuration, "my_new_lc". This could also be considered a rolling deploy of a pre-baked AMI. @@ -281,7 +281,6 @@ def get_properties(autoscaling_group): if getattr(autoscaling_group, "tags", None): properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags) - return properties def elb_dreg(asg_connection, module, group_name, instance_id): @@ -298,7 +297,6 @@ def elb_dreg(asg_connection, module, group_name, instance_id): else: return - exists = True for lb in as_group.load_balancers: elb_connection.deregister_instances(lb, instance_id) log.debug("De-registering {0} from ELB {1}".format(instance_id, lb)) @@ -315,10 +313,8 @@ def elb_dreg(asg_connection, module, group_name, instance_id): time.sleep(10) if wait_timeout <= time.time(): - # waiting took too long + # waiting took too long module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime())) - - def elb_healthy(asg_connection, elb_connection, module, group_name): @@ -337,7 +333,7 @@ def elb_healthy(asg_connection, elb_connection, module, group_name): # but has not yet show up in the ELB try: lb_instances = elb_connection.describe_instance_health(lb, instances=instances) - except boto.exception.InvalidInstance, e: + except boto.exception.InvalidInstance: pass for i in lb_instances: if i.state == "InService": @@ -346,7 +342,6 @@ def elb_healthy(asg_connection, elb_connection, module, group_name): return len(healthy_instances) - def wait_for_elb(asg_connection, module, group_name): region, ec2_url, aws_connect_params = get_aws_connection_info(module) wait_timeout = module.params.get('wait_timeout') @@ -370,7 +365,7 @@ def wait_for_elb(asg_connection, module, group_name): log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances)) time.sleep(10) if wait_timeout <= time.time(): - # waiting took too long + # waiting took too long module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime()) log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances)) @@ -396,7 +391,7 @@ def create_autoscaling_group(connection, module): region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) elif vpc_zone_identifier: vpc_zone_identifier = ','.join(vpc_zone_identifier) @@ -433,7 +428,7 @@ def create_autoscaling_group(connection, module): try: connection.create_auto_scaling_group(ag) - if wait_for_instances == True: + if wait_for_instances: wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') wait_for_elb(connection, module, group_name) as_group = connection.get_all_groups(names=[group_name])[0] @@ -475,7 +470,7 @@ def create_autoscaling_group(connection, module): dead_tags = [] for tag in as_group.tags: have_tags[tag.key] = [tag.value, tag.propagate_at_launch] - if not tag.key in want_tags: + if tag.key not in want_tags: changed = True dead_tags.append(tag) @@ -492,14 +487,13 @@ def create_autoscaling_group(connection, module): changed = True as_group.load_balancers = module.params.get('load_balancers') - if changed: try: as_group.update() except BotoServerError, e: module.fail_json(msg=str(e)) - if wait_for_instances == True: + if wait_for_instances: wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances') wait_for_elb(connection, module, group_name) try: @@ -525,7 +519,7 @@ def delete_autoscaling_group(connection, module): if tmp_groups: tmp_group = tmp_groups[0] if not tmp_group.instances: - instances = False + instances = False time.sleep(10) group.delete() @@ -580,15 +574,15 @@ def replace(connection, module): changed = True return(changed, props) - # we don't want to spin up extra instances if not necessary + # we don't want to spin up extra instances if not necessary if num_new_inst_needed < batch_size: - log.debug("Overriding batch size to {0}".format(num_new_inst_needed)) - batch_size = num_new_inst_needed + log.debug("Overriding batch size to {0}".format(num_new_inst_needed)) + batch_size = num_new_inst_needed if not old_instances: changed = False return(changed, props) - + #check if min_size/max_size/desired capacity have been specified and if not use ASG values if min_size is None: min_size = as_group.min_size @@ -637,7 +631,7 @@ def get_instances_by_lc(props, lc_check, initial_instances): new_instances.append(i) else: old_instances.append(i) - + else: log.debug("Comparing initial instances with current: {0}".format(initial_instances)) for i in props['instances']: @@ -659,10 +653,10 @@ def list_purgeable_instances(props, lc_check, replace_instances, initial_instanc # and they have a non-current launch config if lc_check: for i in instances: - if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']: + if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']: instances_to_terminate.append(i) else: - for i in instances: + for i in instances: if i in initial_instances: instances_to_terminate.append(i) return instances_to_terminate @@ -676,7 +670,7 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le lc_check = module.params.get('lc_check') decrement_capacity = False break_loop = False - + as_group = connection.get_all_groups(names=[group_name])[0] props = get_properties(as_group) desired_size = as_group.min_size @@ -720,7 +714,7 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le elb_dreg(connection, module, group_name, instance_id) log.debug("terminating instance: {0}".format(instance_id)) connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity) - + # we wait to make sure the machines we marked as Unhealthy are # no longer in the list @@ -756,7 +750,7 @@ def wait_for_term_inst(connection, module, term_instances): # waiting took too long module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime()) - + def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop): # make sure we have the latest stats after that last loop. @@ -802,9 +796,9 @@ def main(): termination_policies=dict(type='list', default='Default') ), ) - + module = AnsibleModule( - argument_spec=argument_spec, + argument_spec=argument_spec, mutually_exclusive = [['replace_all_instances', 'replace_instances']] ) @@ -826,13 +820,13 @@ def main(): if state == 'present': create_changed, asg_properties=create_autoscaling_group(connection, module) elif state == 'absent': - changed = delete_autoscaling_group(connection, module) - module.exit_json( changed = changed ) + changed = delete_autoscaling_group(connection, module) + module.exit_json( changed = changed ) if replace_all_instances or replace_instances: replace_changed, asg_properties=replace(connection, module) if create_changed or replace_changed: changed = True module.exit_json( changed = changed, **asg_properties ) - -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py index 4e19a054bd1..5b5569ce00d 100644 --- a/cloud/amazon/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -257,7 +257,7 @@ class ElbManager: try: elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: self.module.fail_json(msg=str(e)) elbs = [] @@ -290,7 +290,7 @@ class ElbManager: try: asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: self.module.fail_json(msg=str(e)) asg_instances = asg.get_all_autoscaling_instances([self.instance_id]) @@ -314,7 +314,7 @@ class ElbManager: """Returns a boto.ec2.InstanceObject for self.instance_id""" try: ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: self.module.fail_json(msg=str(e)) return ec2.get_only_instances(instance_ids=[self.instance_id])[0] @@ -374,4 +374,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 1d9b2db283e..96ef6b22a99 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -492,7 +492,7 @@ class ElbManager(object): try: return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: self.module.fail_json(msg=str(e)) def _delete_elb(self): @@ -981,4 +981,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 41b7effa502..802b9d05a0b 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -311,7 +311,7 @@ def main(): try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) state = module.params.get('state') diff --git a/cloud/amazon/ec2_metric_alarm.py b/cloud/amazon/ec2_metric_alarm.py index 94f303212ae..8ae7195f2e1 100644 --- a/cloud/amazon/ec2_metric_alarm.py +++ b/cloud/amazon/ec2_metric_alarm.py @@ -115,8 +115,6 @@ EXAMPLES = ''' ''' -import sys - try: import boto.ec2.cloudwatch from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm @@ -270,11 +268,11 @@ def main(): state = module.params.get('state') region, ec2_url, aws_connect_params = get_aws_connection_info(module) - + if region: try: connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") @@ -288,4 +286,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_scaling_policy.py b/cloud/amazon/ec2_scaling_policy.py index 220fa325582..3c412232e21 100644 --- a/cloud/amazon/ec2_scaling_policy.py +++ b/cloud/amazon/ec2_scaling_policy.py @@ -178,7 +178,7 @@ def main(): try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg = str(e)) if state == 'present': @@ -187,4 +187,5 @@ def main(): delete_scaling_policy(connection, module) -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index aba121d8dd9..62e36a74ced 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -47,7 +47,7 @@ options: volume_type: description: - Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default - and continues to remain the Ansible default for backwards compatibility. + and continues to remain the Ansible default for backwards compatibility. required: false default: standard version_added: "1.9" @@ -69,7 +69,7 @@ options: default: null zone: description: - - zone in which to create the volume, if unset uses the zone the instance is in (if set) + - zone in which to create the volume, if unset uses the zone the instance is in (if set) required: false default: null aliases: ['aws_zone', 'ec2_zone'] @@ -87,7 +87,7 @@ options: choices: ["yes", "no"] version_added: "1.5" state: - description: + description: - whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8). required: false default: present @@ -101,15 +101,15 @@ extends_documentation_fragment: EXAMPLES = ''' # Simple attachment action -- ec2_vol: - instance: XXXXXX - volume_size: 5 +- ec2_vol: + instance: XXXXXX + volume_size: 5 device_name: sdd -# Example using custom iops params +# Example using custom iops params - ec2_vol: - instance: XXXXXX - volume_size: 5 + instance: XXXXXX + volume_size: 5 iops: 100 device_name: sdd @@ -118,15 +118,15 @@ EXAMPLES = ''' instance: XXXXXX snapshot: "{{ snapshot }}" -# Playbook example combined with instance launch +# Playbook example combined with instance launch - ec2: keypair: "{{ keypair }}" image: "{{ image }}" - wait: yes + wait: yes count: 3 register: ec2 - ec2_vol: - instance: "{{ item.id }} " + instance: "{{ item.id }} " volume_size: 5 with_items: ec2.instances register: ec2_vol @@ -223,7 +223,7 @@ def get_volume(module, ec2): return vols[0] def get_volumes(module, ec2): - + instance = module.params.get('instance') try: @@ -254,12 +254,10 @@ def boto_supports_volume_encryption(): """ return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0') - + def create_volume(module, ec2, zone): changed = False name = module.params.get('name') - id = module.params.get('id') - instance = module.params.get('instance') iops = module.params.get('iops') encrypted = module.params.get('encrypted') volume_size = module.params.get('volume_size') @@ -292,16 +290,16 @@ def create_volume(module, ec2, zone): def attach_volume(module, ec2, volume, instance): - + device_name = module.params.get('device_name') changed = False - + # If device_name isn't set, make a choice based on best practices here: # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html - + # In future this needs to be more dynamic but combining block device mapping best practices # (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;) - + # Use password data attribute to tell whether the instance is Windows or Linux if device_name is None: try: @@ -311,7 +309,7 @@ def attach_volume(module, ec2, volume, instance): device_name = '/dev/xvdf' except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) - + if volume.attachment_state() is not None: adata = volume.attach_data if adata.instance_id != instance.id: @@ -330,9 +328,9 @@ def attach_volume(module, ec2, volume, instance): return volume, changed def detach_volume(module, ec2, volume): - + changed = False - + if volume.attachment_state() is not None: adata = volume.attach_data volume.detach() @@ -340,15 +338,15 @@ def detach_volume(module, ec2, volume): time.sleep(3) volume.update() changed = True - + return volume, changed - + def get_volume_info(volume, state): - + # If we're just listing volumes then do nothing, else get the latest update for the volume if state != 'list': volume.update() - + volume_info = {} attachment = volume.attach_data @@ -369,7 +367,7 @@ def get_volume_info(volume, state): }, 'tags': volume.tags } - + return volume_info def main(): @@ -397,34 +395,32 @@ def main(): name = module.params.get('name') instance = module.params.get('instance') volume_size = module.params.get('volume_size') - volume_type = module.params.get('volume_type') - iops = module.params.get('iops') encrypted = module.params.get('encrypted') device_name = module.params.get('device_name') zone = module.params.get('zone') snapshot = module.params.get('snapshot') state = module.params.get('state') - + # Ensure we have the zone or can get the zone if instance is None and zone is None and state == 'present': module.fail_json(msg="You must specify either instance or zone") - + # Set volume detach flag if instance == 'None' or instance == '': instance = None detach_vol_flag = True else: detach_vol_flag = False - + # Set changed flag changed = False region, ec2_url, aws_connect_params = get_aws_connection_info(module) - + if region: try: ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") @@ -471,11 +467,11 @@ def main(): if volume_size and (id or snapshot): module.fail_json(msg="Cannot specify volume_size together with id or snapshot") - + if state == 'present': volume, changed = create_volume(module, ec2, zone) if detach_vol_flag: - volume, changed = detach_volume(module, ec2, volume) + volume, changed = detach_volume(module, ec2, volume) elif inst is not None: volume, changed = attach_volume(module, ec2, volume, inst) @@ -489,4 +485,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/ec2_vpc_net.py b/cloud/amazon/ec2_vpc_net.py index 51acbcaae37..23ce175b92b 100644 --- a/cloud/amazon/ec2_vpc_net.py +++ b/cloud/amazon/ec2_vpc_net.py @@ -93,9 +93,6 @@ EXAMPLES = ''' ''' -import time -import sys - try: import boto import boto.ec2 @@ -136,15 +133,15 @@ def vpc_exists(module, vpc, name, cidr_block, multi): module.fail_json(msg='Currently there are %d VPCs that have the same name and ' 'CIDR block you specified. If you would like to create ' 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs)) - + return matched_vpc def update_vpc_tags(vpc, module, vpc_obj, tags, name): - + if tags is None: tags = dict() - + tags.update({'Name': name}) try: current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id})) @@ -156,10 +153,10 @@ def update_vpc_tags(vpc, module, vpc_obj, tags, name): except Exception, e: e_msg=boto_exception(e) module.fail_json(msg=e_msg) - + def update_dhcp_opts(connection, module, vpc_obj, dhcp_id): - + if vpc_obj.dhcp_options_id != dhcp_id: connection.associate_dhcp_options(dhcp_id, vpc_obj.id) return True @@ -211,48 +208,47 @@ def main(): tags=module.params.get('tags') state=module.params.get('state') multi=module.params.get('multi_ok') - + changed=False region, ec2_url, aws_connect_params = get_aws_connection_info(module) - + if region: try: connection = connect_to_aws(boto.vpc, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, StandardError), e: + except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="region must be specified") - + if dns_hostnames and not dns_support: module.fail_json('In order to enable DNS Hostnames you must also enable DNS support') if state == 'present': - + # Check if VPC exists vpc_obj = vpc_exists(module, connection, name, cidr_block, multi) - + if vpc_obj is None: try: vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy) changed = True except BotoServerError, e: module.fail_json(msg=e) - - if dhcp_id is not None: + + if dhcp_id is not None: try: if update_dhcp_opts(connection, module, vpc_obj, dhcp_id): changed = True except BotoServerError, e: module.fail_json(msg=e) - - if tags is not None or name is not None: + + if tags is not None or name is not None: try: if update_vpc_tags(connection, module, vpc_obj, tags, name): changed = True except BotoServerError, e: module.fail_json(msg=e) - # Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute # which is needed in order to detect the current status of DNS options. For now we just update @@ -263,21 +259,21 @@ def main(): except BotoServerError, e: e_msg=boto_exception(e) module.fail_json(msg=e_msg) - + # get the vpc obj again in case it has changed try: vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0] except BotoServerError, e: e_msg=boto_exception(e) module.fail_json(msg=e_msg) - + module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj)) elif state == 'absent': - + # Check if VPC exists vpc_obj = vpc_exists(module, connection, name, cidr_block, multi) - + if vpc_obj is not None: try: connection.delete_vpc(vpc_obj.id) @@ -287,11 +283,12 @@ def main(): e_msg = boto_exception(e) module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, " "and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg) - + module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj)) - + # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/amazon/rds_param_group.py b/cloud/amazon/rds_param_group.py index b34e3090b53..fab333f0351 100644 --- a/cloud/amazon/rds_param_group.py +++ b/cloud/amazon/rds_param_group.py @@ -112,7 +112,7 @@ except ImportError: # returns a tuple: (whether or not a parameter was changed, the remaining parameters that weren't found in this parameter group) -class NotModifiableError(StandardError): +class NotModifiableError(Exception): def __init__(self, error_message, *args): super(NotModifiableError, self).__init__(error_message, *args) self.error_message = error_message @@ -175,7 +175,7 @@ def modify_group(group, params, immediate=False): new_params = dict(params) for key in new_params.keys(): - if group.has_key(key): + if key in group: param = group[key] new_value = new_params[key] @@ -281,7 +281,6 @@ def main(): else: break - except BotoServerError, e: module.fail_json(msg = e.error_message) @@ -297,4 +296,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * -main() +if __name__ == '__main__': + main() From 61a3625ed0cf924762b7bce0fec0f92c312acc7b Mon Sep 17 00:00:00 2001 From: krdlab Date: Wed, 2 Dec 2015 18:20:20 +0900 Subject: [PATCH 137/200] Fix 'stat' module document --- files/stat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/stat.py b/files/stat.py index 852ddd5afd2..1e41185ad6a 100644 --- a/files/stat.py +++ b/files/stat.py @@ -111,7 +111,7 @@ stat: path: description: The full path of the file/object to get the facts of returned: success and if path exists - type: boolean + type: string sample: '/path/to/file' mode: description: Unix permissions of the file in octal From 34f7d7b06828d3cf22781dcc7907ecb5a856e2bf Mon Sep 17 00:00:00 2001 From: cspollar Date: Wed, 2 Dec 2015 14:01:16 -0600 Subject: [PATCH 138/200] Fixed typo in uri module example --- network/basics/uri.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index 5c0907523b8..73b2f059f7e 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -159,7 +159,7 @@ EXAMPLES = ''' register: webpage - action: fail - when: "'illustrative' not in webpage.content" + when: "'AWESOME' not in webpage.content" # Create a JIRA issue From 292a83cba7693143f39b92c64cb5493423c81bee Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 2 Dec 2015 14:30:28 -0800 Subject: [PATCH 139/200] corrected version_added for new temp_dest feature --- network/basics/get_url.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/basics/get_url.py b/network/basics/get_url.py index 1e3eb93a71d..b5a40c729a0 100644 --- a/network/basics/get_url.py +++ b/network/basics/get_url.py @@ -62,7 +62,7 @@ options: - https://docs.python.org/2/library/tempfile.html#tempfile.tempdir required: false default: '' - version_added: '2.0' + version_added: '2.1' force: description: - If C(yes) and C(dest) is not a directory, will download the file every From 9c74272c9b8ec573bad16312688697e2329d547d Mon Sep 17 00:00:00 2001 From: Ales Nosek Date: Wed, 2 Dec 2015 20:31:27 -0800 Subject: [PATCH 140/200] Fix #2475 ini_file module: bracklets in key break idempotence Escape the regex special characters in the option name. --- files/ini_file.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/files/ini_file.py b/files/ini_file.py index 82d4621dfbb..2dd021ad27c 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -99,6 +99,22 @@ import ConfigParser import sys import os +# ============================================================== +# match_opt + +def match_opt(option, line): + option = re.escape(option) + return re.match('%s *=' % option, line) \ + or re.match('# *%s *=' % option, line) \ + or re.match('; *%s *=' % option, line) + +# ============================================================== +# match_active_opt + +def match_active_opt(option, line): + option = re.escape(option) + return re.match('%s *=' % option, line) + # ============================================================== # do_ini @@ -141,9 +157,7 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese if within_section and option: if state == 'present': # change the existing option line - if re.match('%s *=' % option, line) \ - or re.match('# *%s *=' % option, line) \ - or re.match('; *%s *=' % option, line): + if match_opt(option, line): newline = '%s = %s\n' % (option, value) changed = ini_lines[index] != newline ini_lines[index] = newline @@ -154,14 +168,14 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese line = ini_lines[index] if line.startswith('['): break - if re.match('%s *=' % option, line): + if match_active_opt(option, line): del ini_lines[index] else: index = index + 1 break else: # comment out the existing option line - if re.match('%s *=' % option, line): + if match_active_opt(option, line): ini_lines[index] = '#%s' % ini_lines[index] changed = True break From bfcdb0559734fd740f2aa422cce3bab28887f0bb Mon Sep 17 00:00:00 2001 From: Arthur Clement Date: Thu, 3 Dec 2015 22:48:13 +0100 Subject: [PATCH 141/200] Example of single instance with ssd gp2 root volume creation --- cloud/amazon/ec2.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index e035e07af24..9da62f616ea 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -309,6 +309,22 @@ EXAMPLES = ''' vpc_subnet_id: subnet-29e63245 assign_public_ip: yes +# Single instance with ssd gp2 root volume +- ec2: + key_name: mykey + group: webserver + instance_type: c3.medium + image: ami-123456 + wait: yes + wait_timeout: 500 + volumes: + - device_name: /dev/xvda + volume_type: gp2 + volume_size: 8 + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes + exact_count: 1 + # Multiple groups example - ec2: key_name: mykey From 191347676eea08817da3fb237f24cdbf2d16e307 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 4 Dec 2015 09:18:45 -0800 Subject: [PATCH 142/200] When the password file does not exist and we're making sure the user isn't in the password file, change error into a warning Warning catches typos in the filename. Since the playbook is saying "make sure this user doesn't have an entry" it makes more sense to warn than to error. Fixes #2619 --- web_infrastructure/htpasswd.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/web_infrastructure/htpasswd.py b/web_infrastructure/htpasswd.py index 4253f1572ac..83a6445374b 100644 --- a/web_infrastructure/htpasswd.py +++ b/web_infrastructure/htpasswd.py @@ -97,6 +97,7 @@ else: apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] + def create_missing_directories(dest): destpath = os.path.dirname(dest) if not os.path.exists(destpath): @@ -155,9 +156,6 @@ def absent(dest, username, check_mode): """ Ensures user is absent Returns (msg, changed) """ - if not os.path.exists(dest): - raise ValueError("%s does not exists" % dest) - if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): ht = HtpasswdFile(dest, new=False) else: @@ -244,6 +242,9 @@ def main(): if state == 'present': (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode) elif state == 'absent': + if not os.path.exists(path): + module.exit_json(msg="%s not present" % username, + warnings="%s does not exist" % path, changed=False) (msg, changed) = absent(path, username, check_mode) else: module.fail_json(msg="Invalid state: %s" % state) From 0d5380258e5c73a5f43764b55fda2a7dc26545d2 Mon Sep 17 00:00:00 2001 From: Veaceslav Mindru Date: Sun, 6 Dec 2015 20:54:05 +0100 Subject: [PATCH 143/200] fix typo s/defaults/default --- packaging/os/yum.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index bed962e0158..4c1b1931c41 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -975,7 +975,7 @@ def main(): conf_file=dict(default=None), disable_gpg_check=dict(required=False, default="no", type='bool'), update_cache=dict(required=False, default="no", type='bool'), - validate_certs=dict(required=False, defaults="yes", type='bool'), + validate_certs=dict(required=False, default="yes", type='bool'), # this should not be needed, but exists as a failsafe install_repoquery=dict(required=False, default="yes", type='bool'), ), From 5599bfb07da40e72bcc1a81503ca877d82243016 Mon Sep 17 00:00:00 2001 From: Mark Theunissen Date: Tue, 8 Dec 2015 19:09:50 +0200 Subject: [PATCH 144/200] Remove Mark Theunissen as maintainer --- database/mysql/mysql_db.py | 8 ++++---- database/mysql/mysql_user.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index df70e0f7e51..5942fe2c3b4 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -30,7 +30,7 @@ options: name: description: - name of the database to add or remove - - name=all May only be provided if I(state) is C(dump) or C(import). + - name=all May only be provided if I(state) is C(dump) or C(import). - if name=all Works like --all-databases option for mysqldump (Added in 2.0) required: true default: null @@ -90,7 +90,7 @@ notes: the credentials from C(~/.my.cnf), and finally fall back to using the MySQL default login of C(root) with no password. requirements: [ ConfigParser ] -author: "Mark Theunissen (@marktheunissen)" +author: "Ansible Core Team" ''' EXAMPLES = ''' @@ -367,7 +367,7 @@ def main(): except Exception, e: module.fail_json(msg="error deleting database: " + str(e)) elif state == "dump": - rc, stdout, stderr = db_dump(module, login_host, login_user, + rc, stdout, stderr = db_dump(module, login_host, login_user, login_password, db, target, all_databases, port=login_port, socket=module.params['login_unix_socket']) @@ -376,7 +376,7 @@ def main(): else: module.exit_json(changed=True, db=db, msg=stdout) elif state == "import": - rc, stdout, stderr = db_import(module, login_host, login_user, + rc, stdout, stderr = db_import(module, login_host, login_user, login_password, db, target, all_databases, port=login_port, socket=module.params['login_unix_socket']) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 3ac7c0890cd..3bc84d28ffd 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -120,7 +120,7 @@ notes: the file." requirements: [ "MySQLdb" ] -author: "Mark Theunissen (@marktheunissen)" +author: "Ansible Core Team" ''' EXAMPLES = """ @@ -139,7 +139,7 @@ EXAMPLES = """ # Specify grants composed of more than one word - mysql_user: name=replication password=12345 priv=*.*:"REPLICATION CLIENT" state=present -# Revoke all privileges for user 'bob' and password '12345' +# Revoke all privileges for user 'bob' and password '12345' - mysql_user: name=bob password=12345 priv=*.*:USAGE state=present # Example privileges string format From c93de2f930e4d9d4f6f9f4825fa1a7007a9d80dc Mon Sep 17 00:00:00 2001 From: quoing Date: Tue, 8 Dec 2015 16:31:25 +0100 Subject: [PATCH 145/200] Add "default" entry option back (removed in e95bcae), update will translate entry to standard parameters so compatibility with BDS is kept --- files/acl.py | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/files/acl.py b/files/acl.py index ad0f4607609..91687f05eb5 100644 --- a/files/acl.py +++ b/files/acl.py @@ -127,23 +127,29 @@ def split_entry(entry): ''' splits entry and ensures normalized return''' a = entry.split(':') + + d = None + if entry.lower().startswith("d"): + d = True + a.pop(0) + if len(a) == 2: a.append(None) t, e, p = a - if t.startswith("u"): + if t.lower().startswith("u"): t = "user" - elif t.startswith("g"): + elif t.lower().startswith("g"): t = "group" - elif t.startswith("m"): + elif t.lower().startswith("m"): t = "mask" - elif t.startswith("o"): + elif t.lower().startswith("o"): t = "other" else: t = None - return [t, e, p] + return [d, t, e, p] def build_entry(etype, entity, permissions=None): @@ -269,16 +275,18 @@ def main(): if etype or entity or permissions: module.fail_json(msg="'entry' MUST NOT be set when 'entity', 'etype' or 'permissions' are set.") - if state == 'present' and entry.count(":") != 2: - module.fail_json(msg="'entry' MUST have 3 sections divided by ':' when 'state=present'.") + if state == 'present' and not entry.count(":") in [2, 3]: + module.fail_json(msg="'entry' MUST have 3 or 4 sections divided by ':' when 'state=present'.") - if state == 'absent' and entry.count(":") != 1: - module.fail_json(msg="'entry' MUST have 2 sections divided by ':' when 'state=absent'.") + if state == 'absent' and not entry.count(":") in [1, 2]: + module.fail_json(msg="'entry' MUST have 2 or 3 sections divided by ':' when 'state=absent'.") if state == 'query': module.fail_json(msg="'entry' MUST NOT be set when 'state=query'.") - etype, entity, permissions = split_entry(entry) + default_flag, etype, entity, permissions = split_entry(entry) + if default_flag != None: + default = default_flag changed = False msg = "" From db66144386d8a27b458ddf4d86b81588f4ddd021 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 8 Dec 2015 11:38:03 -0800 Subject: [PATCH 146/200] simplified lowercasing --- files/acl.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/files/acl.py b/files/acl.py index 91687f05eb5..8d0807e7430 100644 --- a/files/acl.py +++ b/files/acl.py @@ -137,14 +137,15 @@ def split_entry(entry): a.append(None) t, e, p = a + t = t.lower() - if t.lower().startswith("u"): + if t.startswith("u"): t = "user" - elif t.lower().startswith("g"): + elif t.startswith("g"): t = "group" - elif t.lower().startswith("m"): + elif t.startswith("m"): t = "mask" - elif t.lower().startswith("o"): + elif t.startswith("o"): t = "other" else: t = None From 877daf970d37cb7716ac6bb9351a579548fe54aa Mon Sep 17 00:00:00 2001 From: quoing Date: Tue, 8 Dec 2015 13:04:21 +0100 Subject: [PATCH 147/200] Fix: Default ACL parameters are not correctly handled --- files/acl.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/files/acl.py b/files/acl.py index 8d0807e7430..a3997cd495d 100644 --- a/files/acl.py +++ b/files/acl.py @@ -183,9 +183,9 @@ def build_command(module, mode, path, follow, default, recursive, entry=''): if default: if(mode == 'rm'): - cmd.append('-k') + cmd.insert(1, '-k') else: # mode == 'set' or mode == 'get' - cmd.append('-d') + cmd.insert(1, '-d') cmd.append(path) return cmd From 6128845b696b41d90862f6255b9a0e08557101c3 Mon Sep 17 00:00:00 2001 From: Dominique Barton Date: Tue, 24 Nov 2015 11:40:03 +0100 Subject: [PATCH 148/200] bugfix for issue #2537 --- system/user.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/system/user.py b/system/user.py index c04b748f068..e43173dac4f 100755 --- a/system/user.py +++ b/system/user.py @@ -1684,7 +1684,8 @@ class DarwinUser(User): out = '' err = '' - self._make_group_numerical() + if self.group: + self._make_group_numerical() for field in self.fields: if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]: From f2b72e62c0f8c83857bd8fd7396395295a08cb9b Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Tue, 8 Dec 2015 16:16:23 -0500 Subject: [PATCH 149/200] fixed disappearing groups on OSX user module Ensure that we don't try to modify the groups collection if groups are not specified --- system/user.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/system/user.py b/system/user.py index e43173dac4f..397abfbc253 100755 --- a/system/user.py +++ b/system/user.py @@ -1674,9 +1674,10 @@ class DarwinUser(User): self._update_system_user() # here we don't care about change status since it is a creation, # thus changed is always true. - (rc, _out, _err, changed) = self._modify_group() - out += _out - err += _err + if self.groups: + (rc, _out, _err, changed) = self._modify_group() + out += _out + err += _err return (rc, err, out) def modify_user(self): @@ -1708,12 +1709,13 @@ class DarwinUser(User): err += _err changed = rc - (rc, _out, _err, _changed) = self._modify_group() - out += _out - err += _err + if self.groups: + (rc, _out, _err, _changed) = self._modify_group() + out += _out + err += _err - if _changed is True: - changed = rc + if _changed is True: + changed = rc rc = self._update_system_user() if rc == 0: From 18f4f5dcc6121d85c57f8448aeee6f26710d4a2d Mon Sep 17 00:00:00 2001 From: Michel Alexandre Salim Date: Wed, 9 Dec 2015 10:07:16 +0700 Subject: [PATCH 150/200] Set the argument type for ec2_vol's encrypted parameter If this is not set, Ansible parses the parameter as a string. This is fine if the parameter is not provided by the caller, but if it is set to False or True explicitly, ec2_vol receives this as the string 'False' or the string 'True', both of which are truthy. Thus, without this fix, setting the parameter results in encryption always enabled. --- cloud/amazon/ec2_vol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 62e36a74ced..4f1dbf33114 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -379,7 +379,7 @@ def main(): volume_size = dict(), volume_type = dict(choices=['standard', 'gp2', 'io1'], default='standard'), iops = dict(), - encrypted = dict(), + encrypted = dict(type='bool', default=False), device_name = dict(), zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']), snapshot = dict(), From a54d1fe09c3bc15a3bc03c53eb2082f8bad34a7e Mon Sep 17 00:00:00 2001 From: Daniel Kimsey Date: Wed, 9 Dec 2015 11:59:16 -0600 Subject: [PATCH 151/200] Fix yum module failing to initalize yum plugins --- packaging/os/yum.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index bed962e0158..783794690f0 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -195,6 +195,7 @@ def yum_base(conf_file=None): my = yum.YumBase() my.preconf.debuglevel=0 my.preconf.errorlevel=0 + my.preconf.plugins = True if conf_file and os.path.exists(conf_file): my.preconf.fn = conf_file if os.geteuid() != 0: From 8ccfdb874e8db922996d67237d11bdd38f354f9c Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Thu, 10 Dec 2015 00:00:19 +0100 Subject: [PATCH 152/200] Remove a unneeded use of use_unsafe_shell Since use_unsafe_shell is suspicious from a security point of view (or it wouldn't be unsafe), the less we have, the less code we have to toroughly inspect for a security audit. --- system/hostname.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/hostname.py b/system/hostname.py index 2d14b0893b7..1b577367c3e 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -260,8 +260,8 @@ class SystemdStrategy(GenericStrategy): (rc, out, err)) def get_permanent_hostname(self): - cmd = 'hostnamectl --static status' - rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True) + cmd = ['hostnamectl', '--static', 'status'] + rc, out, err = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err)) From 27f561dca2b12db4a61db76d0b97f22b4c2a4acd Mon Sep 17 00:00:00 2001 From: Adam Fields Date: Thu, 10 Dec 2015 12:45:59 -0500 Subject: [PATCH 153/200] added a reference to the template module for clarity --- files/copy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/copy.py b/files/copy.py index 5dd1e9935e6..122d9808472 100644 --- a/files/copy.py +++ b/files/copy.py @@ -27,7 +27,7 @@ module: copy version_added: "historical" short_description: Copies files to remote locations. description: - - The M(copy) module copies a file on the local box to remote locations. Use the M(fetch) module to copy files from remote locations to the local box. + - The M(copy) module copies a file on the local box to remote locations. Use the M(fetch) module to copy files from remote locations to the local box. If you need variable interpolation in copied files, use the M(template) module. options: src: description: From b9fe8166fdcd24691ceac8503ee37ec826c18c00 Mon Sep 17 00:00:00 2001 From: "Timothy R. Chavez" Date: Wed, 9 Dec 2015 19:11:10 -0600 Subject: [PATCH 154/200] Get new server object after adding floating IP We need a new server object once we add the floating ip, otherwise we will be operating with the older server object pre-floating-ip assignment. --- cloud/openstack/os_floating_ip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_floating_ip.py b/cloud/openstack/os_floating_ip.py index 957e3057375..b6342f8fa01 100644 --- a/cloud/openstack/os_floating_ip.py +++ b/cloud/openstack/os_floating_ip.py @@ -154,7 +154,7 @@ def main(): msg="server {0} not found".format(server_name_or_id)) if state == 'present': - cloud.add_ips_to_server( + server = cloud.add_ips_to_server( server=server, ips=floating_ip_address, reuse=reuse, fixed_address=fixed_address, wait=wait, timeout=timeout) fip_address = cloud.get_server_public_ip(server) From c007cd7f9abf8b80a62d627623125912e7256557 Mon Sep 17 00:00:00 2001 From: trevoro Date: Thu, 2 Oct 2014 10:05:35 -0700 Subject: [PATCH 155/200] adding password_hash support to mysql_user module fixing user_add arguments error fixing user_mod arguments error --- database/mysql/mysql_user.py | 72 +++++++++++++++++++++++------------- 1 file changed, 46 insertions(+), 26 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 3bc84d28ffd..a4f7635e5bc 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -35,6 +35,11 @@ options: - set the user's password. (Required when adding a user) required: false default: null + password_hash: + description: + - set the user's password hash (used in place of plain text password) + required: false + default: null host: description: - the 'host' part of the MySQL username @@ -158,6 +163,7 @@ password=n<_665{vS43y import getpass import tempfile import re +import string try: import MySQLdb except ImportError: @@ -211,26 +217,48 @@ def user_exists(cursor, user, host): count = cursor.fetchone() return count[0] > 0 -def user_add(cursor, user, host, password, new_priv): - cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user,host,password)) +def user_add(cursor, user, host, password, password_hash, new_priv): + if password and not password_hash: + cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user,host,password)) + elif password_hash: + cursor.execute("CREATE USER %s@%s IDENTIFIED BY PASSWORD %s", (user,host,password_hash)) if new_priv is not None: for db_table, priv in new_priv.iteritems(): privileges_grant(cursor, user,host,db_table,priv) return True -def user_mod(cursor, user, host, password, new_priv, append_privs): +def is_hash(password): + ishash = False + if len(password) is 41 and password[0] is '*': + ishash = True + for i in password[1:]: + if i not in string.hexdigits: + ishash = False + break + return ishash + +def user_mod(cursor, user, host, password, password_hash, new_priv, append_privs): changed = False grant_option = False - # Handle passwords - if password is not None: + # Handle passwords. + if password is not None or password_hash is not None: cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host)) current_pass_hash = cursor.fetchone() - cursor.execute("SELECT PASSWORD(%s)", (password,)) - new_pass_hash = cursor.fetchone() - if current_pass_hash[0] != new_pass_hash[0]: - cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password)) - changed = True + + if password: + cursor.execute("SELECT PASSWORD(%s)", (password,)) + new_pass_hash = cursor.fetchone() + if current_pass_hash[0] != new_pass_hash[0]: + cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password)) + changed = True + elif password_hash: + if is_hash(password_hash): + if current_pass_hash[0] != password_hash: + cursor.execute("SET PASSWORD FOR %s@%s = %s", (user, host, password_hash)) + changed = True + else: + module.fail_json(msg="password_hash was specified however it does not appear to be a valid hash expecting: *SHA1(SHA1(your_password))") # Handle privileges if new_priv is not None: @@ -387,7 +415,8 @@ def main(): login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), user=dict(required=True, aliases=['name']), - password=dict(default=None, no_log=True), + password=dict(default=None), + password_hash=dict(default=None), host=dict(default="localhost"), state=dict(default="present", choices=["absent", "present"]), priv=dict(default=None), @@ -401,7 +430,8 @@ def main(): login_password = module.params["login_password"] user = module.params["user"] password = module.params["password"] - host = module.params["host"].lower() + password_hash = module.params["password_hash"] + host = module.params["host"] state = module.params["state"] priv = module.params["priv"] check_implicit_admin = module.params['check_implicit_admin'] @@ -434,21 +464,11 @@ def main(): if state == "present": if user_exists(cursor, user, host): - try: - if update_password == 'always': - changed = user_mod(cursor, user, host, password, priv, append_privs) - else: - changed = user_mod(cursor, user, host, None, priv, append_privs) - - except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: - module.fail_json(msg=str(e)) + changed = user_mod(cursor, user, host, password, password_hash, priv, append_privs) else: - if password is None: - module.fail_json(msg="password parameter required when adding a user") - try: - changed = user_add(cursor, user, host, password, priv) - except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: - module.fail_json(msg=str(e)) + if password is None and password_hash is None: + module.fail_json(msg="password or password_hash parameter required when adding a user") + changed = user_add(cursor, user, host, password, password_hash, priv) elif state == "absent": if user_exists(cursor, user, host): changed = user_delete(cursor, user, host) From aba519868f9896d76a5a0a07c9266b94ba4cb6b4 Mon Sep 17 00:00:00 2001 From: Derek Smith Date: Tue, 23 Jun 2015 15:57:18 -0500 Subject: [PATCH 156/200] updated examples added mysql 5.7 user password modification support with backwards compatibility resolved mysql server version check and differences in user authentication management explicitly state support for mysql_native_password type and no others. fixed some failing logic and updated samples updated comment to actually match logic. simplified conditionals and a little refactor --- database/mysql/mysql_user.py | 102 +++++++++++++++++++++++------------ 1 file changed, 68 insertions(+), 34 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index a4f7635e5bc..766eadb10f0 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -37,9 +37,11 @@ options: default: null password_hash: description: - - set the user's password hash (used in place of plain text password) + - Indicate that the 'password' field is a `mysql_native_password` hash required: false - default: null + choices: [ "yes", "no" ] + default: "no" + version_added: "2.0" host: description: - the 'host' part of the MySQL username @@ -123,6 +125,7 @@ notes: without providing any login_user/login_password details. The second must drop a ~/.my.cnf file containing the new root credentials. Subsequent runs of the playbook will then succeed by reading the new credentials from the file." + - Currently, there is only support for the `mysql_native_password` encryted password hash module. requirements: [ "MySQLdb" ] author: "Ansible Core Team" @@ -132,6 +135,9 @@ EXAMPLES = """ # Create database user with name 'bob' and password '12345' with all database privileges - mysql_user: name=bob password=12345 priv=*.*:ALL state=present +# Create database user with name 'bob' and previously hashed mysql native password '*EE0D72C1085C46C5278932678FBE2C6A782821B4' with all database privileges +- mysql_user: name=bob password='*EE0D72C1085C46C5278932678FBE2C6A782821B4' encrypted=yes priv=*.*:ALL state=present + # Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION' - mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present @@ -212,53 +218,78 @@ def connect(module, login_user=None, login_password=None, config_file=''): db_connection = MySQLdb.connect(**config) return db_connection.cursor() +# User Authentication Management was change in MySQL 5.7 +# This is a generic check for if the server version is less than version 5.7 +def server_version_check(cursor): + cursor.execute("SELECT VERSION()"); + result = cursor.fetchone() + version_str = result[0] + version = version_str.split('.') + + if (int(version[0]) <= 5 and int(version[1]) < 7): + return True + else: + return False + def user_exists(cursor, user, host): cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host)) count = cursor.fetchone() return count[0] > 0 -def user_add(cursor, user, host, password, password_hash, new_priv): - if password and not password_hash: +def user_add(cursor, user, host, password, encrypted, new_priv): + if password and encrypted: + cursor.execute("CREATE USER %s@%s IDENTIFIED BY PASSWORD %s", (user,host,password)) + elif password and not encrypted: cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user,host,password)) - elif password_hash: - cursor.execute("CREATE USER %s@%s IDENTIFIED BY PASSWORD %s", (user,host,password_hash)) if new_priv is not None: for db_table, priv in new_priv.iteritems(): privileges_grant(cursor, user,host,db_table,priv) return True - + def is_hash(password): ishash = False - if len(password) is 41 and password[0] is '*': - ishash = True - for i in password[1:]: - if i not in string.hexdigits: - ishash = False - break + if len(password) == 41 and password[0] == '*': + if frozenset(password[1:]).issubset(string.hexdigits): + ishash = True return ishash def user_mod(cursor, user, host, password, password_hash, new_priv, append_privs): changed = False grant_option = False - - # Handle passwords. - if password is not None or password_hash is not None: - cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host)) + + # Handle clear text and hashed passwords. + if bool(password): + # Determine what user management method server uses + old_user_mgmt = server_version_check(cursor) + + if old_user_mgmt: + cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host)) + else: + cursor.execute("SELECT authentication_string FROM user WHERE user = %s AND host = %s", (user,host)) current_pass_hash = cursor.fetchone() - if password: - cursor.execute("SELECT PASSWORD(%s)", (password,)) + if encrypted: + if is_hash(password): + if current_pass_hash[0] != encrypted: + if old_user_mgmt: + cursor.execute("SET PASSWORD FOR %s@%s = %s", (user, host, password)) + else: + cursor.execute("ALTER USER %s@%s IDENTIFIED WITH mysql_native_password AS %s", (user, host, password)) + changed = True + else: + module.fail_json(msg="encrypted was specified however it does not appear to be a valid hash expecting: *SHA1(SHA1(your_password))") + else: + if old_user_mgmt: + cursor.execute("SELECT PASSWORD(%s)", (password,)) + else: + cursor.execute("SELECT CONCAT('*', UCASE(SHA1(UNHEX(SHA1(%s)))))", (password,)) new_pass_hash = cursor.fetchone() if current_pass_hash[0] != new_pass_hash[0]: - cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password)) + if old_user_mgmt: + cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user, host, password)) + else: + cursor.execute("ALTER USER %s@%s IDENTIFIED BY %s", (user, host, password)) changed = True - elif password_hash: - if is_hash(password_hash): - if current_pass_hash[0] != password_hash: - cursor.execute("SET PASSWORD FOR %s@%s = %s", (user, host, password_hash)) - changed = True - else: - module.fail_json(msg="password_hash was specified however it does not appear to be a valid hash expecting: *SHA1(SHA1(your_password))") # Handle privileges if new_priv is not None: @@ -415,8 +446,8 @@ def main(): login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), user=dict(required=True, aliases=['name']), - password=dict(default=None), - password_hash=dict(default=None), + password=dict(default=None, no_log=True), + encrypted=dict(default=False, type='bool'), host=dict(default="localhost"), state=dict(default="present", choices=["absent", "present"]), priv=dict(default=None), @@ -430,8 +461,8 @@ def main(): login_password = module.params["login_password"] user = module.params["user"] password = module.params["password"] - password_hash = module.params["password_hash"] - host = module.params["host"] + encrypted = module.boolean(module.params["encrypted"]) + host = module.params["host"].lower() state = module.params["state"] priv = module.params["priv"] check_implicit_admin = module.params['check_implicit_admin'] @@ -466,9 +497,12 @@ def main(): if user_exists(cursor, user, host): changed = user_mod(cursor, user, host, password, password_hash, priv, append_privs) else: - if password is None and password_hash is None: - module.fail_json(msg="password or password_hash parameter required when adding a user") - changed = user_add(cursor, user, host, password, password_hash, priv) + if password is None: + module.fail_json(msg="password parameter required when adding a user") + try: + changed = user_add(cursor, user, host, password, encrypted, priv) + except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: + module.fail_json(msg=str(e)) elif state == "absent": if user_exists(cursor, user, host): changed = user_delete(cursor, user, host) From 8e812164a40837f16fcf89ae72cce371c4fbf5eb Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Fri, 11 Dec 2015 20:29:45 -0500 Subject: [PATCH 157/200] Add Jmainguy as author, fix hash check --- database/mysql/mysql_user.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 766eadb10f0..059d9fa6f57 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -35,7 +35,7 @@ options: - set the user's password. (Required when adding a user) required: false default: null - password_hash: + encrypted: description: - Indicate that the 'password' field is a `mysql_native_password` hash required: false @@ -128,7 +128,7 @@ notes: - Currently, there is only support for the `mysql_native_password` encryted password hash module. requirements: [ "MySQLdb" ] -author: "Ansible Core Team" +author: "Jonathan Mainguy (@Jmainguy)" ''' EXAMPLES = """ @@ -245,7 +245,7 @@ def user_add(cursor, user, host, password, encrypted, new_priv): for db_table, priv in new_priv.iteritems(): privileges_grant(cursor, user,host,db_table,priv) return True - + def is_hash(password): ishash = False if len(password) == 41 and password[0] == '*': @@ -253,10 +253,10 @@ def is_hash(password): ishash = True return ishash -def user_mod(cursor, user, host, password, password_hash, new_priv, append_privs): +def user_mod(cursor, user, host, password, encrypted, new_priv, append_privs): changed = False grant_option = False - + # Handle clear text and hashed passwords. if bool(password): # Determine what user management method server uses @@ -269,8 +269,9 @@ def user_mod(cursor, user, host, password, password_hash, new_priv, append_privs current_pass_hash = cursor.fetchone() if encrypted: + encrypted_string = (password) if is_hash(password): - if current_pass_hash[0] != encrypted: + if current_pass_hash[0] != encrypted_string: if old_user_mgmt: cursor.execute("SET PASSWORD FOR %s@%s = %s", (user, host, password)) else: @@ -291,6 +292,7 @@ def user_mod(cursor, user, host, password, password_hash, new_priv, append_privs cursor.execute("ALTER USER %s@%s IDENTIFIED BY %s", (user, host, password)) changed = True + # Handle privileges if new_priv is not None: curr_priv = privileges_get(cursor, user,host) @@ -495,7 +497,14 @@ def main(): if state == "present": if user_exists(cursor, user, host): - changed = user_mod(cursor, user, host, password, password_hash, priv, append_privs) + try: + if update_password == 'always': + changed = user_mod(cursor, user, host, password, encrypted, priv, append_privs) + else: + changed = user_mod(cursor, user, host, None, encrypted, priv, append_privs) + + except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: + module.fail_json(msg=str(e)) else: if password is None: module.fail_json(msg="password parameter required when adding a user") From ed43b66d7756f9f48cc6e427eb3208af916f82af Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 13:04:01 -0500 Subject: [PATCH 158/200] made note that Z/z are only 2.1 options --- cloud/docker/docker.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index c6cf10f0783..19e83aef43f 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -81,8 +81,9 @@ options: description: - List of volumes to mount within the container - 'Use docker CLI-style syntax: C(/host:/container[:mode])' - - You can specify a read mode for the mount with either C(ro) or C(rw). SELinux hosts can additionally - use C(z) or C(Z) mount options to use a shared or private label for the volume. + - You can specify a read mode for the mount with either C(ro) or C(rw). + Starting at version 2.1, SELinux hosts can additionally use C(z) or C(Z) + mount options to use a shared or private label for the volume. default: null volumes_from: description: From 10e70aaf2bdc20149e4e3d4d1ef744e597c9daec Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 17:16:47 -0500 Subject: [PATCH 159/200] note that create globs only work on 2.0 fixes #2666 --- commands/command.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/command.py b/commands/command.py index 3fe16882c24..2bd8f7048ad 100644 --- a/commands/command.py +++ b/commands/command.py @@ -47,12 +47,12 @@ options: default: null creates: description: - - a filename or glob pattern, when it already exists, this step will B(not) be run. + - a filename or (since 2.0) glob pattern, when it already exists, this step will B(not) be run. required: no default: null removes: description: - - a filename or glob pattern, when it does not exist, this step will B(not) be run. + - a filename or (since 2.0) glob pattern, when it does not exist, this step will B(not) be run. version_added: "0.8" required: no default: null From f3ed8192412e8c9b9528a04bfef3cad1bad9f62f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 12 Dec 2015 17:29:27 -0500 Subject: [PATCH 160/200] added missing version_added --- system/user.py | 1 + 1 file changed, 1 insertion(+) diff --git a/system/user.py b/system/user.py index 41934389a38..07ad015a561 100755 --- a/system/user.py +++ b/system/user.py @@ -53,6 +53,7 @@ options: required: false description: - Optionally sets the seuser type (user_u) on selinux enabled systems. + version_added: "2.1" group: required: false description: From 0125770d8deaaf5770ae37890a0e051ee000d8f3 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Sun, 13 Dec 2015 09:16:28 -0800 Subject: [PATCH 161/200] Use rpm instead of repoquery for is_installed() * This keeps us from hitting bugs in repoquery/yum plugins in certain instances (#2559). * The previous is also a small performance boost * Also in is_installed(), when using the yum API, return if we detect a package name has been installed. We don't need to also check virtual provides in that case. This is another small performance boost. * Sort the list of packages returned by the list parameter. --- packaging/os/yum.py | 55 ++++++++++++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 21 deletions(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 783794690f0..f9b5c41ef02 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -21,8 +21,6 @@ # along with Ansible. If not, see . # - -import traceback import os import yum import rpm @@ -189,6 +187,7 @@ EXAMPLES = ''' BUFSIZE = 65536 def_qf = "%{name}-%{version}-%{release}.%{arch}" +rpmbin = None def yum_base(conf_file=None): @@ -232,8 +231,8 @@ def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, di en_repos = [] if dis_repos is None: dis_repos = [] - if not repoq: + if not repoq: pkgs = [] try: my = yum_base(conf_file) @@ -241,10 +240,10 @@ def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, di my.repos.disableRepo(rid) for rid in en_repos: my.repos.enableRepo(rid) - + e, m, u = my.rpmdb.matchPackageNames([pkgspec]) pkgs = e + m - if not pkgs: + if not pkgs and not is_pkg: pkgs.extend(my.returnInstalledPackagesByDep(pkgspec)) except Exception, e: module.fail_json(msg="Failure talking to yum: %s" % e) @@ -252,21 +251,31 @@ def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, di return [ po_to_nevra(p) for p in pkgs ] else: + global rpmbin + if not rpmbin: + rpmbin = module.get_bin_path('rpm', required=True) - cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, pkgspec] + cmd = [rpmbin, '-q', '--qf', qf, pkgspec] rc, out, err = module.run_command(cmd) - if not is_pkg: - cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, "--whatprovides", pkgspec] + if rc != 0 and 'is not installed' not in out: + module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err)) + if 'is not installed' in out: + out = '' + + pkgs = [p for p in out.replace('(none)', '0').split('\n') if p.strip()] + if not pkgs and not is_pkg: + cmd = [rpmbin, '-q', '--qf', qf, '--whatprovides', pkgspec] rc2, out2, err2 = module.run_command(cmd) else: rc2, out2, err2 = (0, '', '') - - if rc == 0 and rc2 == 0: - out += out2 - return [p for p in out.split('\n') if p.strip()] - else: - module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2)) - + + if rc2 != 0 and 'no package provides' not in out2: + module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err + err2)) + if 'no package provides' in out2: + out2 = '' + pkgs += [p for p in out2.replace('(none)', '0').split('\n') if p.strip()] + return pkgs + return [] def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, dis_repos=None): @@ -506,20 +515,22 @@ def repolist(module, repoq, qf="%{repoid}"): def list_stuff(module, repoquerybin, conf_file, stuff): qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}" + # is_installed goes through rpm instead of repoquery so it needs a slightly different format + is_installed_qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|installed\n" repoq = [repoquerybin, '--show-duplicates', '--plugins', '--quiet'] if conf_file and os.path.exists(conf_file): repoq += ['-c', conf_file] if stuff == 'installed': - return [ pkg_to_dict(p) for p in is_installed(module, repoq, '-a', conf_file, qf=qf) if p.strip() ] + return [ pkg_to_dict(p) for p in sorted(is_installed(module, repoq, '-a', conf_file, qf=is_installed_qf)) if p.strip() ] elif stuff == 'updates': - return [ pkg_to_dict(p) for p in is_update(module, repoq, '-a', conf_file, qf=qf) if p.strip() ] + return [ pkg_to_dict(p) for p in sorted(is_update(module, repoq, '-a', conf_file, qf=qf)) if p.strip() ] elif stuff == 'available': - return [ pkg_to_dict(p) for p in is_available(module, repoq, '-a', conf_file, qf=qf) if p.strip() ] + return [ pkg_to_dict(p) for p in sorted(is_available(module, repoq, '-a', conf_file, qf=qf)) if p.strip() ] elif stuff == 'repos': - return [ dict(repoid=name, state='enabled') for name in repolist(module, repoq) if name.strip() ] + return [ dict(repoid=name, state='enabled') for name in sorted(repolist(module, repoq)) if name.strip() ] else: - return [ pkg_to_dict(p) for p in is_installed(module, repoq, stuff, conf_file, qf=qf) + is_available(module, repoq, stuff, conf_file, qf=qf) if p.strip() ] + return [ pkg_to_dict(p) for p in sorted(is_installed(module, repoq, stuff, conf_file, qf=is_installed_qf) + is_available(module, repoq, stuff, conf_file, qf=qf)) if p.strip() ] def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): @@ -951,6 +962,7 @@ def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo, return res + def main(): # state=installed name=pkgspec @@ -1022,7 +1034,8 @@ def main(): results = ensure(module, state, pkg, params['conf_file'], enablerepo, disablerepo, disable_gpg_check, exclude, repoquery) if repoquery: - results['msg'] = '%s %s' % (results.get('msg',''), 'Warning: Due to potential bad behaviour with rhnplugin and certificates, used slower repoquery calls instead of Yum API.') + results['msg'] = '%s %s' % (results.get('msg',''), + 'Warning: Due to potential bad behaviour with rhnplugin and certificates, used slower repoquery calls instead of Yum API.') module.exit_json(**results) From 9dd6cad22460d7595cc6caf8f28da8e09fbd9a91 Mon Sep 17 00:00:00 2001 From: Lee H Date: Mon, 14 Dec 2015 11:46:32 -0500 Subject: [PATCH 162/200] - add example showing removal of anonymous user accounts --- database/mysql/mysql_user.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 84b52a95d3f..aa7f19a4415 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -147,6 +147,12 @@ author: "Jonathan Mainguy (@Jmainguy)" ''' EXAMPLES = """ +# Removes anonymous user account for localhost (the name parameter is required, but ignored) +- mysql_user: name=anonymous user_anonymous=yes host=localhost state=absent + +# Removes all anonymous user accounts +- mysql_user: name=anonymous user_anonymous=yes host_all=yes state=absent + # Create database user with name 'bob' and password '12345' with all database privileges - mysql_user: name=bob password=12345 priv=*.*:ALL state=present From f500a2ec53a2897996b8175744249af8fa37a360 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 14 Dec 2015 21:18:13 -0500 Subject: [PATCH 163/200] added mime option to stat module it uses file magic to now return mime_type and charset of a file as per output of `file -i /path` --- files/stat.py | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/files/stat.py b/files/stat.py index 1e41185ad6a..02c78f46d46 100644 --- a/files/stat.py +++ b/files/stat.py @@ -55,6 +55,15 @@ options: default: sha1 aliases: [ 'checksum_algo' ] version_added: "2.0" + mime: + description: + - Use file magic and return data about the nature of the file. this uses the 'file' utility found on most Linux/Unix systems. + - This will add both `mime_type` and 'charset' fields to the return, if possible. + required: false + choices: [ Yes, No ] + default: No + version_added: "2.1" + aliases: [ 'mime_type', 'mime-type' ] author: "Bruce Pennypacker (@bpennypacker)" ''' @@ -278,6 +287,16 @@ stat: returned: success, path exists and user can read stats and installed python supports it type: string sample: www-data + mime_type: + description: file magic data or mime-type + returned: success, path exists and user can read stats and installed python supports it and the `mime` option was true, will return 'unknown' on error. + type: string + sample: PDF document, version 1.2 + charset: + description: file character set or encoding + returned: success, path exists and user can read stats and installed python supports it and the `mime` option was true, will return 'unknown' on error. + type: string + sample: us-ascii ''' import os @@ -293,7 +312,8 @@ def main(): follow = dict(default='no', type='bool'), get_md5 = dict(default='yes', type='bool'), get_checksum = dict(default='yes', type='bool'), - checksum_algorithm = dict(default='sha1', type='str', choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512'], aliases=['checksum_algo']) + checksum_algorithm = dict(default='sha1', type='str', choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512'], aliases=['checksum_algo']), + mime = dict(default=False, type='bool', aliases=['mime_type', 'mime-type']), ), supports_check_mode = True ) @@ -376,6 +396,19 @@ def main(): except: pass + if module.params.get('mime'): + d['mime_type'] = 'unknown' + d['charset'] = 'unknown' + + filecmd = [module.get_bin_path('file', True),'-i', path] + try: + rc, out, err = module.run_command(filecmd) + if rc == 0: + mtype, chset = out.split(':')[1].split(';') + d['mime_type'] = mtype.strip() + d['charset'] = chset.split('=')[1].strip() + except: + pass module.exit_json(changed=False, stat=d) From 0cacadb670b6359446dc7fba1130c4d800fd0664 Mon Sep 17 00:00:00 2001 From: Tobias Smolka Date: Tue, 15 Dec 2015 12:45:51 +0100 Subject: [PATCH 164/200] Making cluster parameter optional --- cloud/vmware/vsphere_guest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index f5507b331c2..3b03ec3da78 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -656,7 +656,7 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo elif resource_pool: try: cluster = [k for k, - v in vsphere_client.get_clusters().items() if v == cluster_name][0] + v in vsphere_client.get_clusters().items() if v == cluster_name][0] if cluster_name else None except IndexError, e: vsphere_client.disconnect() module.fail_json(msg="Cannot find Cluster named: %s" % @@ -1059,7 +1059,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, if resource_pool: try: cluster = [k for k, - v in vsphere_client.get_clusters().items() if v == cluster_name][0] + v in vsphere_client.get_clusters().items() if v == cluster_name][0] if cluster_name else None except IndexError, e: vsphere_client.disconnect() module.fail_json(msg="Cannot find Cluster named: %s" % From 187a4bd5054cf23cd14a94697836e4d1dcd95551 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sat, 28 Nov 2015 12:38:37 -0500 Subject: [PATCH 165/200] Add support for network, boot_from_volume and volumes nics is a great flexible parameter, but it's wordy. Shade now supports a simple parameter too, which is just "network" and takes a name or id. Add passthrough support. In addition to supporting booting from a pre-existing volume, nova and shade both support the concept of booting from volume based on an image. Pass the parameters through. Shade supports boot-time attachment of additional volumes for OpenStack instances. Pass through the parameter so that ansible users can also take advantage of this. --- cloud/openstack/os_server.py | 97 +++++++++++++++++++++++++++++++++--- 1 file changed, 90 insertions(+), 7 deletions(-) diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index 189840e2498..0d0c5566f96 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -80,6 +80,13 @@ options: added. This may be a YAML list or a common separated string. required: false default: None + network: + description: + - Name or ID of a network to attach this instance to. A simpler + version of the nics parameter, only one of network or nics should + be supplied. + required: false + default: None nics: description: - A list of networks to which the instance's interface should @@ -87,6 +94,7 @@ options: or port-name. - 'Also this accepts a string containing a list of (net/port)-(id/name) Eg: nics: "net-id=uuid-1,port-name=myport"' + Only one of network or nics should be supplied. required: false default: None auto_ip: @@ -133,15 +141,32 @@ options: - Opaque blob of data which is made available to the instance required: false default: None - root_volume: + boot_from_volume: + description: + - Should the instance boot from a persistent volume created based on + the image given. Mututally exclusive with boot_volume. + required: false + default: false + volume_size: description: - - Boot instance from a volume + - The size of the volume to create in GB if booting from volume based + on an image. + boot_volume: + description: + - Volume name or id to use as the volume to boot from. Implies + boot_from_volume. Mutually exclusive with image and boot_from_volume. required: false default: None + aliases: ['root_volume'] terminate_volume: description: - If true, delete volume when deleting instance (if booted from volume) default: false + volumes: + description: + - A list of preexisting volumes names or ids to attach to the instance + required: false + default: [] state: description: - Should the resource be present or absent. @@ -280,6 +305,52 @@ EXAMPLES = ''' - net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723 - net-name: another_network meta: "hostname=test1,group=uge_master" + +# Creates a new instance and attaches to a specific network +- os_server: + state: present + auth: + auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/ + username: admin + password: admin + project_name: admin + name: vm1 + image: 4f905f38-e52a-43d2-b6ec-754a13ffb529 + key_name: ansible_key + timeout: 200 + flavor: 4 + network: another_network + +# Creates a new instance with 4G of RAM on a 75G Ubuntu Trusty volume +- name: launch a compute instance + hosts: localhost + tasks: + - name: launch an instance + os_server: + name: vm1 + state: present + cloud: mordred + region_name: ams01 + image: Ubuntu Server 14.04 + flavor_ram: 4096 + boot_from_volume: True + volume_size: 75 + +# Creates a new instance with 2 volumes attached +- name: launch a compute instance + hosts: localhost + tasks: + - name: launch an instance + os_server: + name: vm1 + state: present + cloud: mordred + region_name: ams01 + image: Ubuntu Server 14.04 + flavor_ram: 4096 + volumes: + - photos + - music ''' @@ -339,7 +410,7 @@ def _create_server(module, cloud): flavor_include = module.params['flavor_include'] image_id = None - if not module.params['root_volume']: + if not module.params['boot_volume']: image_id = cloud.get_image_id( module.params['image'], module.params['image_exclude']) @@ -371,7 +442,9 @@ def _create_server(module, cloud): userdata=module.params['userdata'], config_drive=module.params['config_drive'], ) - for optional_param in ('region_name', 'key_name', 'availability_zone'): + for optional_param in ( + 'region_name', 'key_name', 'availability_zone', 'network', + 'volume_size', 'volumes'): if module.params[optional_param]: bootkwargs[optional_param] = module.params[optional_param] @@ -379,7 +452,8 @@ def _create_server(module, cloud): ip_pool=module.params['floating_ip_pools'], ips=module.params['floating_ips'], auto_ip=module.params['auto_ip'], - root_volume=module.params['root_volume'], + boot_volume=module.params['boot_volume'], + boot_from_volume=module.params['boot_from_volume'], terminate_volume=module.params['terminate_volume'], wait=module.params['wait'], timeout=module.params['timeout'], **bootkwargs @@ -461,6 +535,7 @@ def main(): flavor_include = dict(default=None), key_name = dict(default=None), security_groups = dict(default=['default'], type='list'), + network = dict(default=None), nics = dict(default=[], type='list'), meta = dict(default=None), userdata = dict(default=None), @@ -468,8 +543,11 @@ def main(): auto_ip = dict(default=True, type='bool', aliases=['auto_floating_ip', 'public_ip']), floating_ips = dict(default=None), floating_ip_pools = dict(default=None), - root_volume = dict(default=None), + volume_size = dict(default=False, type='int'), + boot_from_volume = dict(default=False, type='bool'), + boot_volume = dict(default=None, aliases=['root_volume']), terminate_volume = dict(default=False, type='bool'), + volumes = dict(default=[], type='list'), state = dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs( @@ -478,7 +556,12 @@ def main(): ['auto_ip', 'floating_ip_pools'], ['floating_ips', 'floating_ip_pools'], ['flavor', 'flavor_ram'], - ['image', 'root_volume'], + ['image', 'boot_volume'], + ['boot_from_volume', 'boot_volume'], + ['nics', 'network'], + ], + required_if=[ + ('boot_from_volume', True, ['volume_size', 'image']), ], ) module = AnsibleModule(argument_spec, **module_kwargs) From 6b13da738bb7d629eef8cd624dce9b9b41eefca5 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 15 Dec 2015 08:43:17 -0500 Subject: [PATCH 166/200] updated module docs, added choices to state --- cloud/amazon/ec2_vpc.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/cloud/amazon/ec2_vpc.py b/cloud/amazon/ec2_vpc.py index a3003a6dcc6..f67909ad233 100644 --- a/cloud/amazon/ec2_vpc.py +++ b/cloud/amazon/ec2_vpc.py @@ -49,19 +49,15 @@ options: - 'A dictionary array of subnets to add of the form: { cidr: ..., az: ... , resource_tags: ... }. Where az is the desired availability zone of the subnet, but it is not required. Tags (i.e.: resource_tags) is also optional and use dictionary form: { "Environment":"Dev", "Tier":"Web", ...}. All VPC subnets not in this list will be removed. As of 1.8, if the subnets parameter is not specified, no existing subnets will be modified.' required: false default: null - aliases: [] vpc_id: description: - A VPC id to terminate when state=absent required: false default: null - aliases: [] resource_tags: description: - 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exist, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.' required: true - default: null - aliases: [] version_added: "1.6" internet_gateway: description: @@ -69,31 +65,26 @@ options: required: false default: "no" choices: [ "yes", "no" ] - aliases: [] route_tables: description: - 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. resource_tags is optional and uses dictionary form: { "Name": "public", ... }. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.' required: false default: null - aliases: [] wait: description: - wait for the VPC to be in state 'available' before returning required: false default: "no" choices: [ "yes", "no" ] - aliases: [] wait_timeout: description: - how long before wait gives up, in seconds default: 300 - aliases: [] state: description: - Create or terminate the VPC required: true - default: present - aliases: [] + choices: [ "present", "absent" ] author: "Carson Gee (@carsongee)" extends_documentation_fragment: - aws From cf061dd93a6e42bdb0d47bbd4145a0dc79427b5a Mon Sep 17 00:00:00 2001 From: Donovan Jones Date: Wed, 16 Dec 2015 15:20:55 +1300 Subject: [PATCH 167/200] fix typo in os_server security_groups description --- cloud/openstack/os_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index 0d0c5566f96..546e2b1644f 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -77,7 +77,7 @@ options: security_groups: description: - Names of the security groups to which the instance should be - added. This may be a YAML list or a common separated string. + added. This may be a YAML list or a comma separated string. required: false default: None network: From 85a19c68bd8d5dd6c85342b66ef9b370c67bfbbf Mon Sep 17 00:00:00 2001 From: Lee H Date: Wed, 16 Dec 2015 02:03:30 -0500 Subject: [PATCH 168/200] - remove user_anonymous as the same thing can be accomplished by user='', but leave in the examples for removing anonymous users --- database/mysql/mysql_user.py | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index aa7f19a4415..09edf8100e7 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -30,13 +30,6 @@ options: description: - name of the user (role) to add or remove required: true - user_anonymous: - description: - - username is to be ignored and anonymous users with no username - handled - required: false - choices: [ "yes", "no" ] - default: no password: description: - set the user's password. (Required when adding a user) @@ -147,11 +140,11 @@ author: "Jonathan Mainguy (@Jmainguy)" ''' EXAMPLES = """ -# Removes anonymous user account for localhost (the name parameter is required, but ignored) -- mysql_user: name=anonymous user_anonymous=yes host=localhost state=absent +# Removes anonymous user account for localhost +- mysql_user: name='' host=localhost state=absent # Removes all anonymous user accounts -- mysql_user: name=anonymous user_anonymous=yes host_all=yes state=absent +- mysql_user: name='' host_all=yes state=absent # Create database user with name 'bob' and password '12345' with all database privileges - mysql_user: name=bob password=12345 priv=*.*:ALL state=present @@ -526,7 +519,6 @@ def main(): login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), user=dict(required=True, aliases=['name']), - user_anonymous=dict(type="bool", default="no"), password=dict(default=None, no_log=True), encrypted=dict(default=False, type='bool'), host=dict(default="localhost"), @@ -542,7 +534,6 @@ def main(): login_user = module.params["login_user"] login_password = module.params["login_password"] user = module.params["user"] - user_anonymous = module.params["user_anonymous"] password = module.params["password"] encrypted = module.boolean(module.params["encrypted"]) host = module.params["host"].lower() @@ -554,9 +545,6 @@ def main(): append_privs = module.boolean(module.params["append_privs"]) update_password = module.params['update_password'] - if user_anonymous: - user = '' - config_file = os.path.expanduser(os.path.expandvars(config_file)) if not mysqldb_found: module.fail_json(msg="the python mysqldb module is required") From f3b2180e422eab2de74fa789be65f116070e88e8 Mon Sep 17 00:00:00 2001 From: Lee H Date: Wed, 16 Dec 2015 02:06:02 -0500 Subject: [PATCH 169/200] - add version_added as requested to host_all --- database/mysql/mysql_user.py | 1 + 1 file changed, 1 insertion(+) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 09edf8100e7..528f7fadd60 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -55,6 +55,7 @@ options: required: false choices: [ "yes", "no" ] default: "no" + version_added: "2.1" login_user: description: - The username used to authenticate with From f04cd88d22f86524d6a94d9791ae499af126e67f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 16 Dec 2015 08:06:29 -0800 Subject: [PATCH 170/200] Fix os_server docs build --- cloud/openstack/os_server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index 546e2b1644f..076af3d8dd4 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -93,8 +93,8 @@ options: be attached. Networks may be referenced by net-id/net-name/port-id or port-name. - 'Also this accepts a string containing a list of (net/port)-(id/name) - Eg: nics: "net-id=uuid-1,port-name=myport"' - Only one of network or nics should be supplied. + Eg: nics: "net-id=uuid-1,port-name=myport" + Only one of network or nics should be supplied.' required: false default: None auto_ip: From 69d56c4d218594613b2f95a8714d6f13d040e083 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Tue, 10 Nov 2015 23:13:48 -0500 Subject: [PATCH 171/200] Unify all 3 mysql modules. Use same connection method, use config_file, and add ssl support --- database/mysql/mysql_db.py | 180 +++++++----------------------- database/mysql/mysql_user.py | 78 ++----------- database/mysql/mysql_variables.py | 125 ++++----------------- 3 files changed, 75 insertions(+), 308 deletions(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index b85526e9524..b7317e91082 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -35,31 +35,6 @@ options: required: true default: null aliases: [ db ] - login_user: - description: - - The username used to authenticate with - required: false - default: null - login_password: - description: - - The password used to authenticate with - required: false - default: null - login_host: - description: - - Host running the database - required: false - default: localhost - login_port: - description: - - Port of the MySQL server. Requires login_host be defined as other then localhost if login_port is used - required: false - default: 3306 - login_unix_socket: - description: - - The path to a Unix domain socket for local connections - required: false - default: null state: description: - The database state @@ -81,19 +56,8 @@ options: - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL files (C(.sql)) as well as bzip2 (C(.bz2)), gzip (C(.gz)) and xz (Added in 2.0) compressed files are supported. required: false -notes: - - Requires the MySQLdb Python package on the remote host. For Ubuntu, this - is as easy as apt-get install python-mysqldb. (See M(apt).) For CentOS/Fedora, this - is as easy as yum install MySQL-python. (See M(yum).) - - Requires the mysql command line client. For Centos/Fedora, this is as easy as - yum install mariadb (See M(yum).). For Debian/Ubuntu this is as easy as - apt-get install mariadb-client. (See M(apt).) - - Both I(login_password) and I(login_user) are required when you are - passing credentials. If none are present, the module will attempt to read - the credentials from C(~/.my.cnf), and finally fall back to using the MySQL - default login of C(root) with no password. -requirements: [ ConfigParser ] author: "Ansible Core Team" +extends_documentation_fragment: mysql ''' EXAMPLES = ''' @@ -111,11 +75,11 @@ EXAMPLES = ''' - mysql_db: state=import name=all target=/tmp/{{ inventory_hostname }}.sql ''' -import ConfigParser import os import pipes import stat import subprocess + try: import MySQLdb except ImportError: @@ -136,9 +100,20 @@ def db_delete(cursor, db): cursor.execute(query) return True -def db_dump(module, host, user, password, db_name, target, all_databases, port, socket=None): +def db_dump(module, host, user, password, db_name, target, all_databases, port, config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None): cmd = module.get_bin_path('mysqldump', True) - cmd += " --quick --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password)) + # If defined, mysqldump demands --defaults-extra-file be the first option + cmd += " --defaults-extra-file=%s --quick" % pipes.quote(config_file) + if user is not None: + cmd += " --user=%s" % pipes.quote(user) + if password is not None: + cmd += " --password=%s" % pipes.quote(password) + if ssl_cert is not None: + cmd += " --ssl-cert=%s" % pipes.quote(ssl_cert) + if ssl_key is not None: + cmd += " --ssl-key=%s" % pipes.quote(ssl_key) + if ssl_cert is not None: + cmd += " --ssl-ca=%s" % pipes.quote(ssl_ca) if socket is not None: cmd += " --socket=%s" % pipes.quote(socket) else: @@ -164,17 +139,25 @@ def db_dump(module, host, user, password, db_name, target, all_databases, port, rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) return rc, stdout, stderr -def db_import(module, host, user, password, db_name, target, all_databases, port, socket=None): +def db_import(module, host, user, password, db_name, target, all_databases, port, config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None): if not os.path.exists(target): return module.fail_json(msg="target %s does not exist on the host" % target) cmd = [module.get_bin_path('mysql', True)] + # --defaults-file must go first, or errors out + cmd.append("--defaults-extra-file=%s" % pipes.quote(config_file)) if user: cmd.append("--user=%s" % pipes.quote(user)) if password: cmd.append("--password=%s" % pipes.quote(password)) if socket is not None: cmd.append("--socket=%s" % pipes.quote(socket)) + if ssl_cert is not None: + cmd.append("--ssl-cert=%s" % pipes.quote(ssl_cert)) + if ssl_key is not None: + cmd.append("--ssl-key=%s" % pipes.quote(ssl_key)) + if ssl_cert is not None: + cmd.append("--ssl-ca=%s" % pipes.quote(ssl_ca)) else: cmd.append("--host=%s" % pipes.quote(host)) cmd.append("--port=%i" % port) @@ -218,61 +201,6 @@ def db_create(cursor, db, encoding, collation): res = cursor.execute(query, query_params) return True -def strip_quotes(s): - """ Remove surrounding single or double quotes - - >>> print strip_quotes('hello') - hello - >>> print strip_quotes('"hello"') - hello - >>> print strip_quotes("'hello'") - hello - >>> print strip_quotes("'hello") - 'hello - - """ - single_quote = "'" - double_quote = '"' - - if s.startswith(single_quote) and s.endswith(single_quote): - s = s.strip(single_quote) - elif s.startswith(double_quote) and s.endswith(double_quote): - s = s.strip(double_quote) - return s - - -def config_get(config, section, option): - """ Calls ConfigParser.get and strips quotes - - See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html - """ - return strip_quotes(config.get(section, option)) - - -def load_mycnf(): - config = ConfigParser.RawConfigParser() - mycnf = os.path.expanduser('~/.my.cnf') - if not os.path.exists(mycnf): - return False - try: - config.readfp(open(mycnf)) - except (IOError): - return False - # We support two forms of passwords in .my.cnf, both pass= and password=, - # as these are both supported by MySQL. - try: - passwd = config_get(config, 'client', 'password') - except (ConfigParser.NoOptionError): - try: - passwd = config_get(config, 'client', 'pass') - except (ConfigParser.NoOptionError): - return False - try: - creds = dict(user=config_get(config, 'client', 'user'),passwd=passwd) - except (ConfigParser.NoOptionError): - return False - return creds - # =========================================== # Module execution. # @@ -290,6 +218,10 @@ def main(): collation=dict(default=""), target=dict(default=None), state=dict(default="present", choices=["absent", "present","dump", "import"]), + ssl_cert=dict(default=None), + ssl_key=dict(default=None), + ssl_ca=dict(default=None), + config_file=dict(default="~/.my.cnf"), ) ) @@ -305,62 +237,37 @@ def main(): login_port = module.params["login_port"] if login_port < 0 or login_port > 65535: module.fail_json(msg="login_port must be a valid unix port number (0-65535)") + ssl_cert = module.params["ssl_cert"] + ssl_key = module.params["ssl_key"] + ssl_ca = module.params["ssl_ca"] + config_file = module.params['config_file'] + config_file = os.path.expanduser(os.path.expandvars(config_file)) + login_password = module.params["login_password"] + login_user = module.params["login_user"] + login_host = module.params["login_host"] # make sure the target path is expanded for ~ and $HOME if target is not None: target = os.path.expandvars(os.path.expanduser(target)) - # Either the caller passes both a username and password with which to connect to - # mysql, or they pass neither and allow this module to read the credentials from - # ~/.my.cnf. - login_password = module.params["login_password"] - login_user = module.params["login_user"] - if login_user is None and login_password is None: - mycnf_creds = load_mycnf() - if mycnf_creds is False: - login_user = "root" - login_password = "" - else: - login_user = mycnf_creds["user"] - login_password = mycnf_creds["passwd"] - elif login_password is None or login_user is None: - module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") - login_host = module.params["login_host"] - if state in ['dump','import']: if target is None: module.fail_json(msg="with state=%s target is required" % (state)) if db == 'all': - connect_to_db = 'mysql' db = 'mysql' all_databases = True else: - connect_to_db = db all_databases = False else: if db == 'all': module.fail_json(msg="name is not allowed to equal 'all' unless state equals import, or dump.") - connect_to_db = '' try: - if socket: - try: - socketmode = os.stat(socket).st_mode - if not stat.S_ISSOCK(socketmode): - module.fail_json(msg="%s, is not a socket, unable to connect" % socket) - except OSError: - module.fail_json(msg="%s, does not exist, unable to connect" % socket) - db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=socket, user=login_user, passwd=login_password, db=connect_to_db) - elif login_port != 3306 and module.params["login_host"] == "localhost": - module.fail_json(msg="login_host is required when login_port is defined, login_host cannot be localhost when login_port is defined") - else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=login_port, user=login_user, passwd=login_password, db=connect_to_db) - cursor = db_connection.cursor() + cursor = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca) except Exception, e: - errno, errstr = e.args - if "Unknown database" in str(e): - module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) + if os.path.exists(config_file): + module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e)) else: - module.fail_json(msg="unable to connect, check login credentials (login_user, and login_password, which can be defined in ~/.my.cnf), check that mysql socket exists and mysql server is running (ERROR: %s %s)" % (errno, errstr)) + module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, e)) changed = False if db_exists(cursor, db): @@ -372,8 +279,7 @@ def main(): elif state == "dump": rc, stdout, stderr = db_dump(module, login_host, login_user, login_password, db, target, all_databases, - port=login_port, - socket=module.params['login_unix_socket']) + login_port, config_file, socket, ssl_cert, ssl_key, ssl_ca) if rc != 0: module.fail_json(msg="%s" % stderr) else: @@ -381,8 +287,7 @@ def main(): elif state == "import": rc, stdout, stderr = db_import(module, login_host, login_user, login_password, db, target, all_databases, - port=login_port, - socket=module.params['login_unix_socket']) + login_port, config_file, socket, ssl_cert, ssl_key, ssl_ca) if rc != 0: module.fail_json(msg="%s" % stderr) else: @@ -399,5 +304,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.database import * +from ansible.module_utils.mysql import * if __name__ == '__main__': main() diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 528f7fadd60..fdf6b577d54 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -56,32 +56,6 @@ options: choices: [ "yes", "no" ] default: "no" version_added: "2.1" - login_user: - description: - - The username used to authenticate with - required: false - default: null - login_password: - description: - - The password used to authenticate with - required: false - default: null - login_host: - description: - - Host running the database - required: false - default: localhost - login_port: - description: - - Port of the MySQL server - required: false - default: 3306 - version_added: '1.4' - login_unix_socket: - description: - - The path to a Unix domain socket for local connections - required: false - default: null priv: description: - "MySQL privileges string in the format: C(db.table:priv1,priv2)" @@ -116,19 +90,7 @@ options: version_added: "2.0" description: - C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users. - config_file: - description: - - Specify a config file from which user and password are to be read - required: false - default: '~/.my.cnf' - version_added: "2.0" notes: - - Requires the MySQLdb Python package on the remote host. For Ubuntu, this - is as easy as apt-get install python-mysqldb. - - Both C(login_password) and C(login_user) are required when you are - passing credentials. If none are present, the module will attempt to read - the credentials from C(~/.my.cnf), and finally fall back to using the MySQL - default login of 'root' with no password. - "MySQL server installs with default login_user of 'root' and no password. To secure this user as part of an idempotent playbook, you must create at least two tasks: the first must change the root user's password, without providing any login_user/login_password details. The second must drop a ~/.my.cnf file containing @@ -136,8 +98,8 @@ notes: the file." - Currently, there is only support for the `mysql_native_password` encryted password hash module. -requirements: [ "MySQLdb" ] author: "Jonathan Mainguy (@Jmainguy)" +extends_documentation_fragment: mysql ''' EXAMPLES = """ @@ -212,30 +174,6 @@ class InvalidPrivsError(Exception): # MySQL module specific support methods. # -def connect(module, login_user=None, login_password=None, config_file=''): - config = { - 'host': module.params['login_host'], - 'db': 'mysql' - } - - if module.params['login_unix_socket']: - config['unix_socket'] = module.params['login_unix_socket'] - else: - config['port'] = module.params['login_port'] - - if os.path.exists(config_file): - config['read_default_file'] = config_file - - # If login_user or login_password are given, they should override the - # config file - if login_user is not None: - config['user'] = login_user - if login_password is not None: - config['passwd'] = login_password - - db_connection = MySQLdb.connect(**config) - return db_connection.cursor() - # User Authentication Management was change in MySQL 5.7 # This is a generic check for if the server version is less than version 5.7 def server_version_check(cursor): @@ -530,6 +468,9 @@ def main(): check_implicit_admin=dict(default=False, type='bool'), update_password=dict(default="always", choices=["always", "on_create"]), config_file=dict(default="~/.my.cnf"), + ssl_cert=dict(default=None), + ssl_key=dict(default=None), + ssl_ca=dict(default=None), ) ) login_user = module.params["login_user"] @@ -545,6 +486,10 @@ def main(): config_file = module.params['config_file'] append_privs = module.boolean(module.params["append_privs"]) update_password = module.params['update_password'] + ssl_cert = module.params["ssl_cert"] + ssl_key = module.params["ssl_key"] + ssl_ca = module.params["ssl_ca"] + db = 'mysql' config_file = os.path.expanduser(os.path.expandvars(config_file)) if not mysqldb_found: @@ -560,14 +505,14 @@ def main(): try: if check_implicit_admin: try: - cursor = connect(module, 'root', '', config_file) + cursor = mysql_connect(module, 'root', '', config_file, ssl_cert, ssl_key, ssl_ca, db) except: pass if not cursor: - cursor = connect(module, login_user, login_password, config_file) + cursor = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca, db) except Exception, e: - module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials. Exception message: %s" % e) + module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e)) if state == "present": if user_exists(cursor, user, host, host_all): @@ -598,5 +543,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.database import * +from ansible.module_utils.mysql import * if __name__ == '__main__': main() diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py index ab4848d6938..5e551cd0eb3 100644 --- a/database/mysql/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -40,26 +40,7 @@ options: description: - If set, then sets variable value to this required: False - login_user: - description: - - username to connect mysql host, if defined login_password also needed. - required: False - login_password: - description: - - password to connect mysql host, if defined login_user also needed. - required: False - login_host: - description: - - mysql host to connect - required: False - login_port: - version_added: "2.0" - description: - - mysql port to connect - required: False - login_unix_socket: - description: - - unix socket to connect mysql server +extends_documentation_fragment: mysql ''' EXAMPLES = ''' # Check for sync_binlog setting @@ -70,7 +51,6 @@ EXAMPLES = ''' ''' -import ConfigParser import os import warnings from re import match @@ -134,66 +114,6 @@ def setvariable(cursor, mysqlvar, value): result = str(e) return result - -def strip_quotes(s): - """ Remove surrounding single or double quotes - - >>> print strip_quotes('hello') - hello - >>> print strip_quotes('"hello"') - hello - >>> print strip_quotes("'hello'") - hello - >>> print strip_quotes("'hello") - 'hello - - """ - single_quote = "'" - double_quote = '"' - - if s.startswith(single_quote) and s.endswith(single_quote): - s = s.strip(single_quote) - elif s.startswith(double_quote) and s.endswith(double_quote): - s = s.strip(double_quote) - return s - - -def config_get(config, section, option): - """ Calls ConfigParser.get and strips quotes - - See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html - """ - return strip_quotes(config.get(section, option)) - - -def load_mycnf(): - config = ConfigParser.RawConfigParser() - mycnf = os.path.expanduser('~/.my.cnf') - if not os.path.exists(mycnf): - return False - try: - config.readfp(open(mycnf)) - except (IOError): - return False - # We support two forms of passwords in .my.cnf, both pass= and password=, - # as these are both supported by MySQL. - try: - passwd = config_get(config, 'client', 'password') - except (ConfigParser.NoOptionError): - try: - passwd = config_get(config, 'client', 'pass') - except (ConfigParser.NoOptionError): - return False - - # If .my.cnf doesn't specify a user, default to user login name - try: - user = config_get(config, 'client', 'user') - except (ConfigParser.NoOptionError): - user = getpass.getuser() - creds = dict(user=user, passwd=passwd) - return creds - - def main(): module = AnsibleModule( argument_spec = dict( @@ -203,14 +123,24 @@ def main(): login_port=dict(default="3306", type='int'), login_unix_socket=dict(default=None), variable=dict(default=None), - value=dict(default=None) - + value=dict(default=None), + ssl_cert=dict(default=None), + ssl_key=dict(default=None), + ssl_ca=dict(default=None), + config_file=dict(default="~/.my.cnf") ) ) user = module.params["login_user"] password = module.params["login_password"] host = module.params["login_host"] port = module.params["login_port"] + ssl_cert = module.params["ssl_cert"] + ssl_key = module.params["ssl_key"] + ssl_ca = module.params["ssl_ca"] + config_file = module.params['config_file'] + config_file = os.path.expanduser(os.path.expandvars(config_file)) + db = 'mysql' + mysqlvar = module.params["variable"] value = module.params["value"] if mysqlvar is None: @@ -222,30 +152,14 @@ def main(): else: warnings.filterwarnings('error', category=MySQLdb.Warning) - # Either the caller passes both a username and password with which to connect to - # mysql, or they pass neither and allow this module to read the credentials from - # ~/.my.cnf. - login_password = module.params["login_password"] - login_user = module.params["login_user"] - if login_user is None and login_password is None: - mycnf_creds = load_mycnf() - if mycnf_creds is False: - login_user = "root" - login_password = "" - else: - login_user = mycnf_creds["user"] - login_password = mycnf_creds["passwd"] - elif login_password is None or login_user is None: - module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") try: - if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") - else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql") - cursor = db_connection.cursor() + cursor = mysql_connect(module, user, password, config_file, ssl_cert, ssl_key, ssl_ca, db) except Exception, e: - errno, errstr = e.args - module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials (ERROR: %s %s)" % (errno, errstr)) + if os.path.exists(config_file): + module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e)) + else: + module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, e)) + mysqlvar_val = getvariable(cursor, mysqlvar) if mysqlvar_val is None: module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False) @@ -269,4 +183,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.database import * +from ansible.module_utils.mysql import * main() From 16a3bdaa7da9e9f7c0572d3a3fdbfd79f29c2b9d Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 16 Dec 2015 14:07:17 -0800 Subject: [PATCH 172/200] Account for mariadb versioning --- database/mysql/mysql_user.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index fdf6b577d54..95d11a164df 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -182,10 +182,14 @@ def server_version_check(cursor): version_str = result[0] version = version_str.split('.') + # Currently we have no facility to handle new-style password update on + # mariadb and the old-style update continues to work + if version_str.lower().endswith('mariadb'): + return True if (int(version[0]) <= 5 and int(version[1]) < 7): - return True + return True else: - return False + return False def user_exists(cursor, user, host, host_all): if host_all: From 9cc67e45a68a7f7aa51be009a9391f01922439c9 Mon Sep 17 00:00:00 2001 From: Alberto Gireud Date: Thu, 17 Dec 2015 07:17:44 -0600 Subject: [PATCH 173/200] Update root_volume variable --- cloud/openstack/os_server.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index 076af3d8dd4..f54b150388d 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -571,14 +571,14 @@ def main(): state = module.params['state'] image = module.params['image'] - root_volume = module.params['root_volume'] + boot_volume = module.params['boot_volume'] flavor = module.params['flavor'] flavor_ram = module.params['flavor_ram'] if state == 'present': - if not (image or root_volume): + if not (image or boot_volume): module.fail_json( - msg="Parameter 'image' or 'root_volume' is required " + msg="Parameter 'image' or 'boot_volume' is required " "if state == 'present'" ) if not flavor and not flavor_ram: From 827b9596da2dce75a245e842db2ac2444744a2ee Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 17 Dec 2015 12:55:43 -0500 Subject: [PATCH 174/200] service goes back to failing when absent if no tools and no init script, this should always fail --- system/service.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/system/service.py b/system/service.py index 2b8dbb8696c..0364766448e 100644 --- a/system/service.py +++ b/system/service.py @@ -471,8 +471,7 @@ class LinuxService(Service): self.enable_cmd = location['chkconfig'] if self.enable_cmd is None: - # exiting without change on non-existent service - self.module.exit_json(changed=False, exists=False) + self.module.fail_json(msg="no service or tool found for: %s" % self.name) # If no service control tool selected yet, try to see if 'service' is available if self.svc_cmd is None and location.get('service', False): @@ -480,7 +479,7 @@ class LinuxService(Service): # couldn't find anything yet if self.svc_cmd is None and not self.svc_initscript: - self.module.exit_json(changed=False, exists=False) + self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting') if location.get('initctl', False): self.svc_initctl = location['initctl'] From b4a3fdd493378853c0b6ab35d5d8bcf52612a4a0 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 11:35:44 -0800 Subject: [PATCH 175/200] Fix mysqldump usage of config_file --- database/mysql/mysql_db.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index b7317e91082..24bcf40ed84 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -103,7 +103,9 @@ def db_delete(cursor, db): def db_dump(module, host, user, password, db_name, target, all_databases, port, config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None): cmd = module.get_bin_path('mysqldump', True) # If defined, mysqldump demands --defaults-extra-file be the first option - cmd += " --defaults-extra-file=%s --quick" % pipes.quote(config_file) + if config_file: + cmd += " --defaults-extra-file=%s" % pipes.quote(config_file) + cmd += " --quick" if user is not None: cmd += " --user=%s" % pipes.quote(user) if password is not None: @@ -145,7 +147,8 @@ def db_import(module, host, user, password, db_name, target, all_databases, port cmd = [module.get_bin_path('mysql', True)] # --defaults-file must go first, or errors out - cmd.append("--defaults-extra-file=%s" % pipes.quote(config_file)) + if config_file: + cmd.append("--defaults-extra-file=%s" % pipes.quote(config_file)) if user: cmd.append("--user=%s" % pipes.quote(user)) if password: @@ -270,6 +273,8 @@ def main(): module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, e)) changed = False + if not os.path.exists(config_file): + config_file = None if db_exists(cursor, db): if state == "absent": try: From 9366dfb63e565c9e0901d714be8832fc89b275d6 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Thu, 17 Dec 2015 13:45:04 -0800 Subject: [PATCH 176/200] mariadb isn't always the last elemen of the version string --- database/mysql/mysql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 95d11a164df..51f6e9ea1d4 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -184,7 +184,7 @@ def server_version_check(cursor): # Currently we have no facility to handle new-style password update on # mariadb and the old-style update continues to work - if version_str.lower().endswith('mariadb'): + if 'mariadb' in version_str.lower(): return True if (int(version[0]) <= 5 and int(version[1]) < 7): return True From 1bd04f797e2da8da8261c067eda09777ddf23fd7 Mon Sep 17 00:00:00 2001 From: Pedro Romano Date: Wed, 1 Oct 2014 10:41:17 +0100 Subject: [PATCH 177/200] GCE libcloud 0.15 support and code cleanup * Code formatting (indentation and white space) fixes for improved PEP8 conformity. * Remove redundant backslashes inside parentheses. * Test for object identity should be 'is not None'. * Test for membership should be 'not in'. * Fit docstring to the PEP8 79 character limit. * Use forward compatible Python 2.6+ 'except .. as' syntax for exception handling. * Support libcloud > 0.15 'metadata' argument format. --- cloud/google/gce.py | 137 ++++++++++++++++++++++++++------------------ 1 file changed, 80 insertions(+), 57 deletions(-) diff --git a/cloud/google/gce.py b/cloud/google/gce.py index 1de351a12fb..d3c60fcec34 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -44,7 +44,8 @@ options: default: "n1-standard-1" metadata: description: - - a hash/dictionary of custom data for the instance; '{"key":"value",...}' + - a hash/dictionary of custom data for the instance; + '{"key": "value", ...}' required: false default: null service_account_email: @@ -56,10 +57,17 @@ options: service_account_permissions: version_added: "2.0" description: - - service account permissions (see U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), --scopes section for detailed information) + - service account permissions (see + U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), + --scopes section for detailed information) required: false default: null - choices: ["bigquery", "cloud-platform", "compute-ro", "compute-rw", "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write", "monitoring", "sql", "sql-admin", "storage-full", "storage-ro", "storage-rw", "taskqueue", "userinfo-email"] + choices: [ + "bigquery", "cloud-platform", "compute-ro", "compute-rw", + "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write", + "monitoring", "sql", "sql-admin", "storage-full", "storage-ro", + "storage-rw", "taskqueue", "userinfo-email" + ] pem_file: version_added: "1.5.1" description: @@ -88,7 +96,10 @@ options: default: "false" disks: description: - - a list of persistent disks to attach to the instance; a string value gives the name of the disk; alternatively, a dictionary value can define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry will be the boot disk (which must be READ_WRITE). + - a list of persistent disks to attach to the instance; a string value + gives the name of the disk; alternatively, a dictionary value can + define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry + will be the boot disk (which must be READ_WRITE). required: false default: null version_added: "1.7" @@ -111,7 +122,8 @@ options: ip_forward: version_added: "1.9" description: - - set to true if the instance can forward ip packets (useful for gateways) + - set to true if the instance can forward ip packets (useful for + gateways) required: false default: "false" external_ip: @@ -167,7 +179,8 @@ EXAMPLES = ''' tasks: - name: Launch instances local_action: gce instance_names={{names}} machine_type={{machine_type}} - image={{image}} zone={{zone}} service_account_email={{ service_account_email }} + image={{image}} zone={{zone}} + service_account_email={{ service_account_email }} pem_file={{ pem_file }} project_id={{ project_id }} register: gce - name: Wait for SSH to come up @@ -195,10 +208,11 @@ EXAMPLES = ''' ''' try: + import libcloud from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver from libcloud.common.google import GoogleBaseError, QuotaExceededError, \ - ResourceExistsError, ResourceInUseError, ResourceNotFoundError + ResourceExistsError, ResourceInUseError, ResourceNotFoundError _ = Provider.GCE HAS_LIBCLOUD = True except ImportError: @@ -239,7 +253,7 @@ def get_instance_info(inst): public_ip = inst.public_ips[0] return({ - 'image': not inst.image is None and inst.image.split('/')[-1] or None, + 'image': inst.image is not None and inst.image.split('/')[-1] or None, 'disks': disk_names, 'machine_type': inst.size, 'metadata': metadata, @@ -250,7 +264,8 @@ def get_instance_info(inst): 'status': ('status' in inst.extra) and inst.extra['status'] or None, 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [], 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None, - }) + }) + def create_instances(module, gce, instance_names): """Creates new instances. Attributes other than instance_names are picked @@ -308,25 +323,31 @@ def create_instances(module, gce, instance_names): # with: # [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...] if metadata: - try: - md = literal_eval(str(metadata)) - if not isinstance(md, dict): - raise ValueError('metadata must be a dict') - except ValueError, e: - module.fail_json(msg='bad metadata: %s' % str(e)) - except SyntaxError, e: - module.fail_json(msg='bad metadata syntax') - + if isinstance(metadata, dict): + md = metadata + else: + try: + md = literal_eval(str(metadata)) + if not isinstance(md, dict): + raise ValueError('metadata must be a dict') + except ValueError as e: + module.fail_json(msg='bad metadata: %s' % str(e)) + except SyntaxError as e: + module.fail_json(msg='bad metadata syntax') + + if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15': items = [] - for k,v in md.items(): - items.append({"key": k,"value": v}) + for k, v in md.items(): + items.append({"key": k, "value": v}) metadata = {'items': items} + else: + metadata = md ex_sa_perms = [] bad_perms = [] if service_account_permissions: for perm in service_account_permissions: - if not perm in gce.SA_SCOPES_MAP.keys(): + if perm not in gce.SA_SCOPES_MAP.keys(): bad_perms.append(perm) if len(bad_perms) > 0: module.fail_json(msg='bad permissions: %s' % str(bad_perms)) @@ -339,7 +360,7 @@ def create_instances(module, gce, instance_names): # These variables all have default values but check just in case if not lc_image or not lc_network or not lc_machine_type or not lc_zone: module.fail_json(msg='Missing required create instance variable', - changed=False) + changed=False) for name in instance_names: pd = None @@ -352,16 +373,19 @@ def create_instances(module, gce, instance_names): pd = gce.ex_get_volume("%s" % name, lc_zone) inst = None try: - inst = gce.create_node(name, lc_machine_type, lc_image, - location=lc_zone, ex_network=network, ex_tags=tags, - ex_metadata=metadata, ex_boot_disk=pd, ex_can_ip_forward=ip_forward, - external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete, ex_service_accounts=ex_sa_perms) + inst = gce.create_node( + name, lc_machine_type, lc_image, location=lc_zone, + ex_network=network, ex_tags=tags, ex_metadata=metadata, + ex_boot_disk=pd, ex_can_ip_forward=ip_forward, + external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete, + ex_service_accounts=ex_sa_perms + ) changed = True except ResourceExistsError: inst = gce.ex_get_node(name, lc_zone) - except GoogleBaseError, e: - module.fail_json(msg='Unexpected error attempting to create ' + \ - 'instance %s, error: %s' % (name, e.value)) + except GoogleBaseError as e: + module.fail_json(msg='Unexpected error attempting to create ' + + 'instance %s, error: %s' % (name, e.value)) for i, lc_disk in enumerate(lc_disks): # Check whether the disk is already attached @@ -417,7 +441,7 @@ def terminate_instances(module, gce, instance_names, zone_name): inst = gce.ex_get_node(name, zone_name) except ResourceNotFoundError: pass - except Exception, e: + except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) if inst: gce.destroy_node(inst) @@ -429,27 +453,27 @@ def terminate_instances(module, gce, instance_names, zone_name): def main(): module = AnsibleModule( - argument_spec = dict( - image = dict(default='debian-7'), - instance_names = dict(), - machine_type = dict(default='n1-standard-1'), - metadata = dict(), - name = dict(), - network = dict(default='default'), - persistent_boot_disk = dict(type='bool', default=False), - disks = dict(type='list'), - state = dict(choices=['active', 'present', 'absent', 'deleted'], - default='present'), - tags = dict(type='list'), - zone = dict(default='us-central1-a'), - service_account_email = dict(), - service_account_permissions = dict(type='list'), - pem_file = dict(), - project_id = dict(), - ip_forward = dict(type='bool', default=False), - external_ip = dict(choices=['ephemeral', 'none'], - default='ephemeral'), - disk_auto_delete = dict(type='bool', default=True), + argument_spec=dict( + image=dict(default='debian-7'), + instance_names=dict(), + machine_type=dict(default='n1-standard-1'), + metadata=dict(), + name=dict(), + network=dict(default='default'), + persistent_boot_disk=dict(type='bool', default=False), + disks=dict(type='list'), + state=dict(choices=['active', 'present', 'absent', 'deleted'], + default='present'), + tags=dict(type='list'), + zone=dict(default='us-central1-a'), + service_account_email=dict(), + service_account_permissions=dict(type='list'), + pem_file=dict(), + project_id=dict(), + ip_forward=dict(type='bool', default=False), + external_ip=dict(choices=['ephemeral', 'none'], + default='ephemeral'), + disk_auto_delete=dict(type='bool', default=True), ) ) @@ -482,15 +506,15 @@ def main(): inames.append(name) if not inames: module.fail_json(msg='Must specify a "name" or "instance_names"', - changed=False) + changed=False) if not zone: module.fail_json(msg='Must specify a "zone"', changed=False) json_output = {'zone': zone} if state in ['absent', 'deleted']: json_output['state'] = 'absent' - (changed, terminated_instance_names) = terminate_instances(module, - gce, inames, zone) + (changed, terminated_instance_names) = terminate_instances( + module, gce, inames, zone) # based on what user specified, return the same variable, although # value could be different if an instance could not be destroyed @@ -501,15 +525,14 @@ def main(): elif state in ['active', 'present']: json_output['state'] = 'present' - (changed, instance_data,instance_name_list) = create_instances( - module, gce, inames) + (changed, instance_data, instance_name_list) = create_instances( + module, gce, inames) json_output['instance_data'] = instance_data if instance_names: json_output['instance_names'] = instance_name_list elif name: json_output['name'] = name - json_output['changed'] = changed module.exit_json(**json_output) From 8fe5d6f7ef7f17f1ec54a44570daf28ebd85f208 Mon Sep 17 00:00:00 2001 From: Joe Quadrino Date: Tue, 3 Nov 2015 09:11:18 -0500 Subject: [PATCH 178/200] add devices parameter for docker module --- cloud/docker/docker.py | 39 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 4a3fb238603..a7f924ae289 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -95,6 +95,11 @@ options: - 'alias. Use docker CLI-style syntax: C(redis:myredis).' default: null version_added: "1.5" + devices: + description: + - List of host devices to expose to container + default: null + required: false log_driver: description: - You can specify a different logging driver for the container than for the daemon. @@ -386,6 +391,8 @@ EXAMPLES = ''' # stopped and removed, and a new one will be launched in its place. # - link this container to the existing redis container launched above with # an alias. +# - grant the container read write permissions for the host's /dev/sda device +# through a node named /dev/xvda # - bind TCP port 9000 within the container to port 8080 on all interfaces # on the host. # - bind UDP port 9001 within the container to port 8081 on the host, only @@ -400,6 +407,8 @@ EXAMPLES = ''' pull: always links: - "myredis:aliasedredis" + devices: + - "/dev/sda:/dev/xvda:rwm" ports: - "8080:9000" - "127.0.0.1:8081:9001/udp" @@ -602,6 +611,7 @@ class DockerManager(object): # docker-py version is a tuple of ints because we have to compare them # server APIVersion is passed to a docker-py function that takes strings _cap_ver_req = { + 'devices': ((0, 7, 0), '1.2'), 'dns': ((0, 3, 0), '1.10'), 'volumes_from': ((0, 3, 0), '1.10'), 'restart_policy': ((0, 5, 0), '1.14'), @@ -839,11 +849,15 @@ class DockerManager(object): } optionals = {} - for optional_param in ('dns', 'volumes_from', 'restart_policy', - 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver', - 'cap_add', 'cap_drop', 'read_only', 'log_opt'): + for optional_param in ('devices', 'dns', 'volumes_from', + 'restart_policy', 'restart_policy_retry', 'pid', 'extra_hosts', + 'log_driver', 'cap_add', 'cap_drop', 'read_only', 'log_opt'): optionals[optional_param] = self.module.params.get(optional_param) + if optionals['devices'] is not None: + self.ensure_capability('devices') + params['devices'] = optionals['devices'] + if optionals['dns'] is not None: self.ensure_capability('dns') params['dns'] = optionals['dns'] @@ -1299,6 +1313,24 @@ class DockerManager(object): differing.append(container) continue + # DEVICES + + expected_devices = set() + for device in (self.module.params.get('devices') or []): + if len(device.split(':')) == 2: + expected_devices.add(device + ":rwm") + else: + expected_devices.add(device) + + actual_devices = set() + for device in (container['HostConfig']['Devices'] or []): + actual_devices.add("{PathOnHost}:{PathInContainer}:{CgroupPermissions}".format(**device)) + + if actual_devices != expected_devices: + self.reload_reasons.append('devices ({0} => {1})'.format(actual_devices, expected_devices)) + differing.append(container) + continue + # DNS expected_dns = set(self.module.params.get('dns') or []) @@ -1667,6 +1699,7 @@ def main(): volumes = dict(default=None, type='list'), volumes_from = dict(default=None), links = dict(default=None, type='list'), + devices = dict(default=None, type='list'), memory_limit = dict(default=0), memory_swap = dict(default=0), docker_url = dict(), From 581b4f6de6eb633ec9df3615629f86df3120fa47 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 16:33:48 -0800 Subject: [PATCH 179/200] Add version_added to documentation --- cloud/docker/docker.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index a7f924ae289..0902aaf92b7 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -100,6 +100,7 @@ options: - List of host devices to expose to container default: null required: false + version_added: "2.1" log_driver: description: - You can specify a different logging driver for the container than for the daemon. @@ -391,7 +392,7 @@ EXAMPLES = ''' # stopped and removed, and a new one will be launched in its place. # - link this container to the existing redis container launched above with # an alias. -# - grant the container read write permissions for the host's /dev/sda device +# - grant the container read write permissions for the host's /dev/sda device # through a node named /dev/xvda # - bind TCP port 9000 within the container to port 8080 on all interfaces # on the host. @@ -849,8 +850,8 @@ class DockerManager(object): } optionals = {} - for optional_param in ('devices', 'dns', 'volumes_from', - 'restart_policy', 'restart_policy_retry', 'pid', 'extra_hosts', + for optional_param in ('devices', 'dns', 'volumes_from', + 'restart_policy', 'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver', 'cap_add', 'cap_drop', 'read_only', 'log_opt'): optionals[optional_param] = self.module.params.get(optional_param) @@ -1313,7 +1314,7 @@ class DockerManager(object): differing.append(container) continue - # DEVICES + # DEVICES expected_devices = set() for device in (self.module.params.get('devices') or []): From 19ebc453647d99245a6a37ffd6040193f47baa67 Mon Sep 17 00:00:00 2001 From: Omar Khan Date: Thu, 24 Sep 2015 12:15:59 +0700 Subject: [PATCH 180/200] Add stop_timeout option to docker module --- cloud/docker/docker.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 3727d305b04..f5a1f0db24a 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -341,6 +341,12 @@ options: required: false default: null version_added: "1.9.4" + stop_timeout: + description: + - How many seconds to wait for the container to stop before killing it. + required: false + default: 10 + version_added: "2.0" author: - "Cove Schneider (@cove)" - "Joshua Conner (@joshuaconner)" @@ -626,6 +632,7 @@ class DockerManager(object): 'cap_drop': ((0, 5, 0), '1.14'), 'read_only': ((1, 0, 0), '1.17'), 'labels': ((1, 2, 0), '1.18'), + 'stop_timeout': ((0, 5, 0), '1.0'), # Clientside only 'insecure_registry': ((0, 5, 0), '0.0') } @@ -1542,7 +1549,7 @@ class DockerManager(object): def stop_containers(self, containers): for i in containers: - self.client.stop(i['Id']) + self.client.stop(i['Id'], self.module.params.get('stop_timeout')) self.increment_counter('stopped') return [self.client.wait(i['Id']) for i in containers] @@ -1745,6 +1752,7 @@ def main(): cap_drop = dict(default=None, type='list'), read_only = dict(default=None, type='bool'), labels = dict(default={}, type='dict'), + stop_timeout = dict(default=10, type='int'), ), required_together = ( ['tls_client_cert', 'tls_client_key'], From ba3ee25172ef09de5b187dc56653945a843fd668 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 17:57:17 -0800 Subject: [PATCH 181/200] labels is actually not aded in 1.9.4 but in 2.1 --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index f5a1f0db24a..b95c7ba5706 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -340,7 +340,7 @@ options: - Set container labels. Requires docker >= 1.6 and docker-py >= 1.2.0. required: false default: null - version_added: "1.9.4" + version_added: "2.1" stop_timeout: description: - How many seconds to wait for the container to stop before killing it. From 15c1c0cca79196d4dde630db2a7eee90367051cc Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 21:28:01 -0800 Subject: [PATCH 182/200] entrypoint feature added in 2.1 --- cloud/docker/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index fe1ef4707d1..86402bee773 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -53,7 +53,7 @@ options: Used to match and launch containers. default: null required: false - version_added: "2.0" + version_added: "2.1" command: description: - Command used to match and launch containers. From fcb3397df7944ff15ea698b5717c06e8fc7d43ba Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 18 Dec 2015 22:22:55 -0800 Subject: [PATCH 183/200] Fix up documentation --- cloud/google/gce.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/google/gce.py b/cloud/google/gce.py index d3c60fcec34..fcaa3b85023 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -45,7 +45,7 @@ options: metadata: description: - a hash/dictionary of custom data for the instance; - '{"key": "value", ...}' + '{"key":"value", ...}' required: false default: null service_account_email: From 3281cad95e6cf4fa2f213c641102bf4d7bc16b2f Mon Sep 17 00:00:00 2001 From: "T.Kuramochi" Date: Fri, 28 Aug 2015 14:46:08 +0900 Subject: [PATCH 184/200] Add HTTP Proxy options Update a document file for win_get_url.ps1. Update add a prefix proxy_ for this variables Update a document file for win_get_url.ps1. Update win_get_url.ps1 20150907 --- windows/win_get_url.ps1 | 12 ++++++++++++ windows/win_get_url.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index a83ad2633b0..b7b1a1ed445 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -44,6 +44,10 @@ $skip_certificate_validation = Get-Attr $params "skip_certificate_validation" $f $username = Get-Attr $params "username" $password = Get-Attr $params "password" +$proxy_url = $params.proxy_url +$proxy_username = $params.proxy_username +$proxy_password = $params.proxy_password + if($skip_certificate_validation){ [System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true} } @@ -52,6 +56,14 @@ $force = Get-Attr -obj $params -name "force" "yes" | ConvertTo-Bool If ($force -or -not (Test-Path $dest)) { $client = New-Object System.Net.WebClient + if($params.proxy_url) { + $proxy_url = $params.proxy_url + if($proxy_username -and $proxy_password){ + $proxy_credential = New-Object System.Net.NetworkCredential($proxy_username, $proxy_password) + $proxy_server.Credentials = $proxy_credential + } + $client.Proxy = $proxy_server + } if($username -and $password){ $client.Credentials = New-Object System.Net.NetworkCredential($username, $password) diff --git a/windows/win_get_url.py b/windows/win_get_url.py index 5c3e994d418..cfe93982f61 100644 --- a/windows/win_get_url.py +++ b/windows/win_get_url.py @@ -65,6 +65,30 @@ options: - Skip SSL certificate validation if true required: false default: false + proxy_url: + description: + - The full URL of the proxy server a file to download through it. + version_added: "2.0" + required: false + choices: null + default: null + proxy_username: + description: + - Name of the user for authorization of the proxy server. + version_added: "2.0" + required: false + choices: null + default: null + proxy_password: + description: + - Password of the user for authorization of the proxy server. + version_added: "2.0" + required: false + choices: null + default: null +author: + - "Paul Durivage (@angstwad)" + - "Takeshi Kuramochi (tksarah)" ''' EXAMPLES = ''' @@ -83,4 +107,12 @@ $ ansible -i hosts -c winrm -m win_get_url -a "url=http://www.example.com/earthr url: 'http://www.example.com/earthrise.jpg' dest: 'C:\Users\RandomUser\earthrise.jpg' force: no + +- name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg' through the proxy server. + win_get_url: + url: 'http://www.example.com/earthrise.jpg' + dest: 'C:\Users\RandomUser\earthrise.jpg' + proxy_url: 'http://10.0.0.1:8080' + proxy_username: 'username' + proxy_password: 'password' ''' From c9fe542e8c1a2833fa640beb0c7595dcf4c4f0f8 Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Sat, 19 Dec 2015 00:19:16 -0800 Subject: [PATCH 185/200] minor win_get_url doc update --- windows/win_get_url.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/windows/win_get_url.py b/windows/win_get_url.py index cfe93982f61..1e2031170e1 100644 --- a/windows/win_get_url.py +++ b/windows/win_get_url.py @@ -67,21 +67,21 @@ options: default: false proxy_url: description: - - The full URL of the proxy server a file to download through it. + - The full URL of the proxy server to download through. version_added: "2.0" required: false choices: null default: null proxy_username: description: - - Name of the user for authorization of the proxy server. + - Proxy authentication username version_added: "2.0" required: false choices: null default: null proxy_password: description: - - Password of the user for authorization of the proxy server. + - Proxy authentication password version_added: "2.0" required: false choices: null @@ -108,7 +108,7 @@ $ ansible -i hosts -c winrm -m win_get_url -a "url=http://www.example.com/earthr dest: 'C:\Users\RandomUser\earthrise.jpg' force: no -- name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg' through the proxy server. +- name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg' through a proxy server. win_get_url: url: 'http://www.example.com/earthrise.jpg' dest: 'C:\Users\RandomUser\earthrise.jpg' From 11f4340a441547e09248e116324fa16d16f12caa Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Sat, 19 Dec 2015 01:11:35 -0800 Subject: [PATCH 186/200] win_get_url doc/strict-mode fixes plus cleaning up from bad merge --- windows/win_get_url.ps1 | 10 +++++----- windows/win_get_url.py | 6 ------ 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1 index b7b1a1ed445..71a4d5751af 100644 --- a/windows/win_get_url.ps1 +++ b/windows/win_get_url.ps1 @@ -44,9 +44,9 @@ $skip_certificate_validation = Get-Attr $params "skip_certificate_validation" $f $username = Get-Attr $params "username" $password = Get-Attr $params "password" -$proxy_url = $params.proxy_url -$proxy_username = $params.proxy_username -$proxy_password = $params.proxy_password +$proxy_url = Get-Attr $params "proxy_url" +$proxy_username = Get-Attr $params "proxy_username" +$proxy_password = Get-Attr $params "proxy_password" if($skip_certificate_validation){ [System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true} @@ -56,8 +56,8 @@ $force = Get-Attr -obj $params -name "force" "yes" | ConvertTo-Bool If ($force -or -not (Test-Path $dest)) { $client = New-Object System.Net.WebClient - if($params.proxy_url) { - $proxy_url = $params.proxy_url + if($proxy_url) { + $proxy_server = New-Object System.Net.WebProxy($proxy_url, $true) if($proxy_username -and $proxy_password){ $proxy_credential = New-Object System.Net.NetworkCredential($proxy_username, $proxy_password) $proxy_server.Credentials = $proxy_credential diff --git a/windows/win_get_url.py b/windows/win_get_url.py index 1e2031170e1..26b0dc7b012 100644 --- a/windows/win_get_url.py +++ b/windows/win_get_url.py @@ -70,22 +70,16 @@ options: - The full URL of the proxy server to download through. version_added: "2.0" required: false - choices: null - default: null proxy_username: description: - Proxy authentication username version_added: "2.0" required: false - choices: null - default: null proxy_password: description: - Proxy authentication password version_added: "2.0" required: false - choices: null - default: null author: - "Paul Durivage (@angstwad)" - "Takeshi Kuramochi (tksarah)" From 6e9adc46870816eb08f686537612ab1e319da504 Mon Sep 17 00:00:00 2001 From: Sam Thursfield Date: Fri, 29 May 2015 16:59:12 +0000 Subject: [PATCH 187/200] os_server: Add some error checking for the 'nics' parameter If this parameter was not of the right type, the module would fail with a traceback, with a "AttributeError: 'str' object has no attribute 'get'" exception. It now gives a proper error message on type errors. --- cloud/openstack/os_server.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index f54b150388d..036d4edded7 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -372,7 +372,14 @@ def _network_args(module, cloud): args = [] nics = module.params['nics'] + if type(nics) != list: + module.fail_json(msg='The \'nics\' parameter must be a list.') + for net in _parse_nics(nics): + if type(net) != dict: + module.fail_json( + msg='Each entry in the \'nics\' parameter must be a dict.') + if net.get('net-id'): args.append(net) elif net.get('net-name'): From 956d1d0700c9e950edbea620a75baaa725bff106 Mon Sep 17 00:00:00 2001 From: Alexander Gubin Date: Tue, 22 Dec 2015 10:07:08 +0100 Subject: [PATCH 188/200] command: Add warnings for dnf and zypper --- commands/command.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/command.py b/commands/command.py index 2bd8f7048ad..f86d2c83253 100644 --- a/commands/command.py +++ b/commands/command.py @@ -140,9 +140,9 @@ def check_command(commandline): 'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch' } commands = { 'git': 'git', 'hg': 'hg', 'curl': 'get_url', 'wget': 'get_url', 'svn': 'subversion', 'service': 'service', - 'mount': 'mount', 'rpm': 'yum', 'yum': 'yum', 'apt-get': 'apt-get', + 'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt-get', 'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'template or lineinfile', - 'rsync': 'synchronize' } + 'rsync': 'synchronize', 'dnf': 'dnf', 'zypper': 'zypper' } become = [ 'sudo', 'su', 'pbrun', 'pfexec', 'runas' ] warnings = list() command = os.path.basename(commandline.split()[0]) From 90f8228fb1330d87da32e84d76b6282c471fa5ca Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 22 Dec 2015 10:42:47 +0100 Subject: [PATCH 189/200] Fix #2629, replace choices=BOOLEANS by type='bool' --- cloud/openstack/os_ironic.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/cloud/openstack/os_ironic.py b/cloud/openstack/os_ironic.py index 0ec4366b79f..38f19e71e58 100644 --- a/cloud/openstack/os_ironic.py +++ b/cloud/openstack/os_ironic.py @@ -183,17 +183,12 @@ def _choose_id_value(module): return None -def _is_value_true(value): - true_values = [True, 'yes', 'Yes', 'True', 'true'] - if value in true_values: - return True - return False def _choose_if_password_only(module, patch): if len(patch) is 1: - if 'password' in patch[0]['path'] and _is_value_true( - module.params['skip_update_of_masked_password']): + if 'password' in patch[0]['path'] and + module.params['skip_update_of_masked_password']: # Return false to aabort update as the password appears # to be the only element in the patch. return False @@ -219,7 +214,7 @@ def main(): properties=dict(type='dict', default={}), ironic_url=dict(required=False), chassis_uuid=dict(required=False), - skip_update_of_masked_password=dict(required=False, choices=BOOLEANS), + skip_update_of_masked_password=dict(required=False, type='bool'), state=dict(required=False, default='present') ) module_kwargs = openstack_module_kwargs() From 860635d38c2dada0a5e4b3974507b1d1ede315a1 Mon Sep 17 00:00:00 2001 From: Michael Scherer Date: Tue, 22 Dec 2015 16:20:37 +0100 Subject: [PATCH 190/200] Add more precise documentation on the requirements parameter Fix #427 --- packaging/language/pip.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packaging/language/pip.py b/packaging/language/pip.py index 6d325282770..d896c5b9ed5 100755 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -44,7 +44,8 @@ options: default: null requirements: description: - - The path to a pip requirements file + - The path to a pip requirements file, which should be local to the remote system. + File can be specified as a relative path if using the chdir option. required: false default: null virtualenv: From 5e91c7b4d4b10671f0a4e49b627ecd7a005dd6f1 Mon Sep 17 00:00:00 2001 From: Charles Paul Date: Tue, 22 Dec 2015 10:01:01 -0600 Subject: [PATCH 191/200] adding no_log to password --- cloud/vmware/vsphere_guest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index f71ce66264a..1b5a6518211 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -1535,7 +1535,7 @@ def main(): argument_spec=dict( vcenter_hostname=dict(required=True, type='str'), username=dict(required=True, type='str'), - password=dict(required=True, type='str'), + password=dict(required=True, type='str', no_log=True), state=dict( required=False, choices=[ From 907b7f7cf4c97a720391a82eb65c2705a259614e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 22 Dec 2015 11:08:43 -0500 Subject: [PATCH 192/200] fix bad linebreak --- cloud/openstack/os_ironic.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/openstack/os_ironic.py b/cloud/openstack/os_ironic.py index 38f19e71e58..a6612cf195e 100644 --- a/cloud/openstack/os_ironic.py +++ b/cloud/openstack/os_ironic.py @@ -187,8 +187,7 @@ def _choose_id_value(module): def _choose_if_password_only(module, patch): if len(patch) is 1: - if 'password' in patch[0]['path'] and - module.params['skip_update_of_masked_password']: + if 'password' in patch[0]['path'] and module.params['skip_update_of_masked_password']: # Return false to aabort update as the password appears # to be the only element in the patch. return False From d4c5360380e8c8236b60445e531cc2b2040ddaf2 Mon Sep 17 00:00:00 2001 From: Barnaby Court Date: Tue, 22 Dec 2015 22:26:51 -0500 Subject: [PATCH 193/200] add no_log to the password parameter --- packaging/os/redhat_subscription.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/redhat_subscription.py b/packaging/os/redhat_subscription.py index 2e6887164cf..19f2ce7cf37 100644 --- a/packaging/os/redhat_subscription.py +++ b/packaging/os/redhat_subscription.py @@ -388,7 +388,7 @@ def main(): argument_spec = dict( state = dict(default='present', choices=['present', 'absent']), username = dict(default=None, required=False), - password = dict(default=None, required=False), + password = dict(default=None, required=False, no_log=True), server_hostname = dict(default=rhn.config.get_option('server.hostname'), required=False), server_insecure = dict(default=rhn.config.get_option('server.insecure'), required=False), rhsm_baseurl = dict(default=rhn.config.get_option('rhsm.baseurl'), required=False), From 1fe64796179a73510718ef38ad361e6bafc3febe Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 24 Dec 2015 00:12:50 -0500 Subject: [PATCH 194/200] be smarter when dealing with policy_json input Now module will assume that if the argument is a string it is already formated as json and will only try to convert non strings into json string. Also removed unused 'msg' var declarations and the ifs that set them fixes #2009 --- cloud/amazon/iam_policy.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index 44a708c9a66..2054a331c60 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -146,9 +146,7 @@ def user_action(module, iam, name, policy_name, skip, pdoc, state): if urllib.unquote(iam.get_user_policy(name, pol). get_user_policy_result.policy_document) == pdoc: policy_match = True - if policy_match: - msg=("The policy document you specified already exists " - "under the name %s." % pol) + if state == 'present' and skip: if policy_name not in current_policies and not policy_match: changed = True @@ -189,15 +187,12 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state): module.exit_json(changed=False) else: module.fail_json(msg=e.message) - - try: + + try: for pol in current_policies: if urllib.unquote(iam.get_role_policy(name, pol). get_role_policy_result.policy_document) == pdoc: policy_match = True - if policy_match: - msg=("The policy document you specified already exists " - "under the name %s." % pol) if state == 'present' and skip: if policy_name not in current_policies and not policy_match: changed = True @@ -305,10 +300,12 @@ def main(): pdoc = json.dumps(json.load(json_data)) json_data.close() elif module.params.get('policy_json') != None: - try: - pdoc = json.dumps(module.params.get('policy_json')) - except Exception as e: - module.fail_json(msg=str(e) + '\n' + module.params.get('policy_json')) + # if its a string, assume it is already JSON + if not isinstance(pdoc, basestring): + try: + pdoc = json.dumps(module.params.get('policy_json')) + except Exception as e: + module.fail_json(msg='Failed to convert the policy into valid JSON: %s' % str(e)) else: pdoc=None From 410ee327705326db54b1e4d6b8829164a5135ccd Mon Sep 17 00:00:00 2001 From: Ben Visser Date: Fri, 25 Dec 2015 23:14:04 -0700 Subject: [PATCH 195/200] staging that num_nodes is required when state=present --- cloud/amazon/elasticache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index a22bea70d72..590dd987018 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -50,7 +50,7 @@ options: default: cache.m1.small num_nodes: description: - - The initial number of cache nodes that the cache cluster will have + - The initial number of cache nodes that the cache cluster will have. Required when state=present. required: false cache_port: description: From 0f21a9316d8f4726e141f841a31c6ccd90512eea Mon Sep 17 00:00:00 2001 From: Dmitry Labutin Date: Tue, 29 Dec 2015 09:58:38 +0300 Subject: [PATCH 196/200] Github uses both HTTP and HTTPS protocols --- source_control/git.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source_control/git.py b/source_control/git.py index bdc87b034a2..e4e3f3945d8 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -33,7 +33,7 @@ options: required: true aliases: [ name ] description: - - git, SSH, or HTTP protocol address of the git repository. + - git, SSH, or HTTP(S) protocol address of the git repository. dest: required: true description: From 33014c6db1ce757d0ffa065e6c9924ac4db1cacc Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Mon, 4 Jan 2016 16:05:43 -0800 Subject: [PATCH 197/200] fix win_file to stop on errors --- windows/win_file.ps1 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/windows/win_file.ps1 b/windows/win_file.ps1 index f387780123c..958f9f04fcc 100644 --- a/windows/win_file.ps1 +++ b/windows/win_file.ps1 @@ -17,6 +17,8 @@ # WANT_JSON # POWERSHELL_COMMON +$ErrorActionPreference = "Stop" + $params = Parse-Args $args # path From 48bd80c9cbf0bf9b0b81bdeb158b7f6338b2a11a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 5 Jan 2016 14:28:39 -0500 Subject: [PATCH 198/200] adds diff info for file info --- files/file.py | 42 ++++++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/files/file.py b/files/file.py index 428565579b8..4fbf8cfa46d 100644 --- a/files/file.py +++ b/files/file.py @@ -190,6 +190,7 @@ def main(): prev_state = get_state(path) + # state should default to file, but since that creates many conflicts, # default to 'current' when it exists. if state is None: @@ -226,10 +227,23 @@ def main(): module.fail_json(path=path, msg="recurse option requires state to be 'directory'") file_args = module.load_file_common_arguments(params) + changed = False + diff = {'before': + {'path': path} + , + 'after': + {'path': path} + } + + state_change = False + if prev_state != state: + diff['before']['state'] = prev_state + diff['after']['state'] = state + state_change = True if state == 'absent': - if state != prev_state: + if state_change: if not module.check_mode: if prev_state == 'directory': try: @@ -241,13 +255,13 @@ def main(): os.unlink(path) except Exception, e: module.fail_json(path=path, msg="unlinking failed: %s " % str(e)) - module.exit_json(path=path, changed=True) + module.exit_json(path=path, changed=True, diff=diff) else: module.exit_json(path=path, changed=False) elif state == 'file': - if state != prev_state: + if state_change: if follow and prev_state == 'link': # follow symlink and operate on original path = os.path.realpath(path) @@ -258,8 +272,8 @@ def main(): # file is not absent and any other state is a conflict module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state)) - changed = module.set_fs_attributes_if_different(file_args, changed) - module.exit_json(path=path, changed=changed) + changed = module.set_fs_attributes_if_different(file_args, changed, diff) + module.exit_json(path=path, changed=changed, diff=diff) elif state == 'directory': if follow and prev_state == 'link': @@ -268,7 +282,7 @@ def main(): if prev_state == 'absent': if module.check_mode: - module.exit_json(changed=True) + module.exit_json(changed=True, diff=diff) changed = True curpath = '' @@ -292,7 +306,7 @@ def main(): raise tmp_file_args = file_args.copy() tmp_file_args['path']=curpath - changed = module.set_fs_attributes_if_different(tmp_file_args, changed) + changed = module.set_fs_attributes_if_different(tmp_file_args, changed, diff) except Exception, e: module.fail_json(path=path, msg='There was an issue creating %s as requested: %s' % (curpath, str(e))) @@ -300,12 +314,12 @@ def main(): elif prev_state != 'directory': module.fail_json(path=path, msg='%s already exists as a %s' % (path, prev_state)) - changed = module.set_fs_attributes_if_different(file_args, changed) + changed = module.set_fs_attributes_if_different(file_args, changed, diff) if recurse: changed |= recursive_set_attributes(module, file_args['path'], follow, file_args) - module.exit_json(path=path, changed=changed) + module.exit_json(path=path, changed=changed, diff=diff) elif state in ['link','hard']: @@ -374,10 +388,10 @@ def main(): module.fail_json(path=path, msg='Error while linking: %s' % str(e)) if module.check_mode and not os.path.exists(path): - module.exit_json(dest=path, src=src, changed=changed) + module.exit_json(dest=path, src=src, changed=changed, diff=diff) - changed = module.set_fs_attributes_if_different(file_args, changed) - module.exit_json(dest=path, src=src, changed=changed) + changed = module.set_fs_attributes_if_different(file_args, changed, diff) + module.exit_json(dest=path, src=src, changed=changed, diff=diff) elif state == 'touch': if not module.check_mode: @@ -395,7 +409,7 @@ def main(): else: module.fail_json(msg='Cannot touch other than files, directories, and hardlinks (%s is %s)' % (path, prev_state)) try: - module.set_fs_attributes_if_different(file_args, True) + module.set_fs_attributes_if_different(file_args, True, diff) except SystemExit, e: if e.code: # We take this to mean that fail_json() was called from @@ -405,7 +419,7 @@ def main(): os.remove(path) raise e - module.exit_json(dest=path, changed=True) + module.exit_json(dest=path, changed=True, diff=diff) module.fail_json(path=path, msg='unexpected position reached') From 92ec54c9b36c6289dc311867958f418bcd77f449 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Tue, 5 Jan 2016 15:53:21 -0500 Subject: [PATCH 199/200] Fix bad merge --- database/mysql/mysql_user.py | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 51f6e9ea1d4..3ca952ca08e 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -267,29 +267,6 @@ def user_mod(cursor, user, host, host_all, password, encrypted, new_priv, append cursor.execute("ALTER USER %s@%s IDENTIFIED BY %s", (user, host, password)) changed = True - - # Handle privileges - if new_priv is not None: - curr_priv = privileges_get(cursor, user,host) - - # If the user has privileges on a db.table that doesn't appear at all in - # the new specification, then revoke all privileges on it. - for db_table, priv in curr_priv.iteritems(): - # If the user has the GRANT OPTION on a db.table, revoke it first. - if "GRANT" in priv: - grant_option = True - if db_table not in new_priv: - if user != "root" and "PROXY" not in priv and not append_privs: - privileges_revoke(cursor, user,host,db_table,priv,grant_option) - changed = True - - # If the user doesn't currently have any privileges on a db.table, then - # we can perform a straight grant operation. - for db_table, priv in new_priv.iteritems(): - if db_table not in curr_priv: - privileges_grant(cursor, user,host,db_table,priv) - changed = True - # Handle privileges if new_priv is not None: curr_priv = privileges_get(cursor, user,host) From 14950824c3d0bb09b7e09ff137ac26fb25d69f5a Mon Sep 17 00:00:00 2001 From: Arata Notsu Date: Wed, 6 Jan 2016 11:54:57 +0900 Subject: [PATCH 200/200] apt: export env vars before run dpkg Without this change, some trouble may occur when "deb" parameter is used as env vars controlling dpkg are not set. For example, installing a package that requires user input will never end since DEBIAN_FRONTEND=noninteractive is not set. So export env vars in APT_ENV_VARS before run dpkg, like in cases using apt-get/aptitude. --- packaging/os/apt.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 3fe9c62c07d..84a019ae203 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -443,6 +443,9 @@ def install_deb(m, debs, cache, force, install_recommends, dpkg_options): if force: options += " --force-all" + for (k,v) in APT_ENV_VARS.iteritems(): + os.environ[k] = v + cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install)) rc, out, err = m.run_command(cmd) if "stdout" in retvals: