From fe5d90f27f5a8af84304324a89e21552d82abfdb Mon Sep 17 00:00:00 2001 From: Rowan Wookey Date: Mon, 16 Mar 2015 17:53:35 +0000 Subject: [PATCH 01/86] Fixes #542 error when ec2_asg arguments aren't specified If max_size/min_size/desired_capacity are omitted when updating an autoscaling group use the existing values --- cloud/amazon/ec2_asg.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index 6e5d3508cb8..9c98b282aef 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -47,15 +47,15 @@ options: required: false min_size: description: - - Minimum number of instances in group + - Minimum number of instances in group, if unspecified then the current group value will be used. required: false max_size: description: - - Maximum number of instances in group + - Maximum number of instances in group, if unspecified then the current group value will be used. required: false desired_capacity: description: - - Desired number of instances in group + - Desired number of instances in group, if unspecified then the current group value will be used. required: false replace_all_instances: description: @@ -449,6 +449,13 @@ def replace(connection, module): changed = False return(changed, props) + #check if min_size/max_size/desired capacity have been specified and if not use ASG values + if min_size is None: + min_size = as_group.min_size + if max_size is None: + max_size = as_group.max_size + if desired_capacity is None: + desired_capacity = as_group.desired_capacity # set temporary settings and wait for them to be reached as_group.max_size = max_size + batch_size as_group.min_size = min_size + batch_size From 8cb4b7b01c7a45512105a9e0a30654219facf725 Mon Sep 17 00:00:00 2001 From: Matt Ferrante Date: Wed, 17 Dec 2014 11:30:05 -0500 Subject: [PATCH 02/86] ec2_ami can update an ami's launch_permissions --- cloud/amazon/ec2_ami.py | 69 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 68 insertions(+), 1 deletion(-) diff --git a/cloud/amazon/ec2_ami.py b/cloud/amazon/ec2_ami.py index 0d504ee3b0c..a8fad2d1b3d 100644 --- a/cloud/amazon/ec2_ami.py +++ b/cloud/amazon/ec2_ami.py @@ -86,6 +86,12 @@ options: required: false default: null version_added: "2.0" + launch_permissions: + description: + - Users and groups that should be able to launch the ami. Expects dictionary with a key of user_ids and/or group_names. user_ids should be a list of account ids. group_name should be a list of groups, "all" is the only acceptable value currently. + required: false + default: null + aliases: [] author: "Evan Duffield (@scicoin-project) " extends_documentation_fragment: aws @@ -151,6 +157,25 @@ EXAMPLES = ''' delete_snapshot: False state: absent +# Update AMI Launch Permissions, making it public +- ec2_ami: + aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx + aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + region: xxxxxx + image_id: "{{ instance.image_id }}" + state: present + launch_permissions: + group_names: ['all'] + +# Allow AMI to be launched by another account +- ec2_ami: + aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx + aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + region: xxxxxx + image_id: "{{ instance.image_id }}" + state: present + launch_permissions: + user_ids: ['123456789012'] ''' import sys @@ -181,6 +206,7 @@ def create_image(module, ec2): no_reboot = module.params.get('no_reboot') device_mapping = module.params.get('device_mapping') tags = module.params.get('tags') + launch_permissions = module.params.get('launch_permissions') try: params = {'instance_id': instance_id, @@ -241,6 +267,12 @@ def create_image(module, ec2): ec2.create_tags(image_id, tags) except boto.exception.EC2ResponseError, e: module.fail_json(msg = "Image tagging failed => %s: %s" % (e.error_code, e.error_message)) + if launch_permissions: + try: + img = ec2.get_image(image_id) + img.set_launch_permissions(**launch_permissions) + except boto.exception.BotoServerError, e: + module.fail_json(msg="%s: %s" % (e.error_code, e.error_message), image_id=image_id) module.exit_json(msg="AMI creation operation complete", image_id=image_id, state=img.state, changed=True) @@ -281,6 +313,36 @@ def deregister_image(module, ec2): sys.exit(0) +def update_image(module, ec2): + """ + Updates AMI + """ + + image_id = module.params.get('image_id') + launch_permissions = module.params.get('launch_permissions') + if 'user_ids' in launch_permissions: + launch_permissions['user_ids'] = [str(user_id) for user_id in launch_permissions['user_ids']] + + img = ec2.get_image(image_id) + if img == None: + module.fail_json(msg = "Image %s does not exist" % image_id, changed=False) + + try: + set_permissions = img.get_launch_permissions() + if set_permissions != launch_permissions: + if ('user_ids' in launch_permissions and launch_permissions['user_ids']) or ('group_names' in launch_permissions and launch_permissions['group_names']): + res = img.set_launch_permissions(**launch_permissions) + elif ('user_ids' in set_permissions and set_permissions['user_ids']) or ('group_names' in set_permissions and set_permissions['group_names']): + res = img.remove_launch_permissions(**set_permissions) + else: + module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False) + module.exit_json(msg="AMI launch permissions updated", launch_permissions=launch_permissions, set_perms=set_permissions, changed=True) + else: + module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False) + + except boto.exception.BotoServerError, e: + module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( @@ -294,7 +356,8 @@ def main(): no_reboot = dict(default=False, type="bool"), state = dict(default='present'), device_mapping = dict(type='list'), - tags = dict(type='dict') + tags = dict(type='dict'), + launch_permissions = dict(type='dict') ) ) module = AnsibleModule(argument_spec=argument_spec) @@ -314,6 +377,10 @@ def main(): deregister_image(module, ec2) elif module.params.get('state') == 'present': + if module.params.get('image_id') and module.params.get('launch_permissions'): + # Update image's launch permissions + update_image(module, ec2) + # Changed is always set to true when provisioning new AMI if not module.params.get('instance_id'): module.fail_json(msg='instance_id parameter is required for new image') From c93456adc3d330a192591ef4f10d83163c10c32d Mon Sep 17 00:00:00 2001 From: Bill Nottingham Date: Tue, 18 Aug 2015 10:14:59 -0400 Subject: [PATCH 03/86] win_msi: document extra_args The extra_args parameter was not documented. It's needed for installing some MSIs. --- windows/win_msi.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/windows/win_msi.py b/windows/win_msi.py index 01f09709f57..bd504879a83 100644 --- a/windows/win_msi.py +++ b/windows/win_msi.py @@ -34,6 +34,10 @@ options: description: - File system path to the MSI file to install required: true + extra_args: + description: + - Additional arguments to pass to the msiexec.exe command + required: false state: description: - Whether the MSI file should be installed or uninstalled From b04efa22c4403ca869e94e7918721306d23afa8d Mon Sep 17 00:00:00 2001 From: Sarah Haskins Date: Fri, 21 Aug 2015 11:33:28 -0400 Subject: [PATCH 04/86] Expose cache_parameter_group_name in elasticache module The cache_parameter_group_name was not previously exposed in elasticachy.py, I have exposed it, as optional. --- cloud/amazon/elasticache.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index 31ed4696628..32756bb8c22 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -43,6 +43,11 @@ options: - The version number of the cache engine required: false default: none + cache_parameter_group_name: + description: + - The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group for the specified engine will be used. + required: false + default: none node_type: description: - The compute and memory capacity of the nodes in the cache cluster @@ -150,11 +155,12 @@ class ElastiCacheManager(object): def __init__(self, module, name, engine, cache_engine_version, node_type, num_nodes, cache_port, cache_subnet_group, cache_security_groups, security_group_ids, zone, wait, - hard_modify, region, **aws_connect_kwargs): + hard_modify, region, cache_parameter_group_name=None, **aws_connect_kwargs): self.module = module self.name = name self.engine = engine self.cache_engine_version = cache_engine_version + self.cache_parameter_group_name = cache_parameter_group_name self.node_type = node_type self.num_nodes = num_nodes self.cache_port = cache_port @@ -214,6 +220,7 @@ class ElastiCacheManager(object): cache_node_type=self.node_type, engine=self.engine, engine_version=self.cache_engine_version, + cache_parameter_group_name=self.cache_parameter_group_name, cache_security_group_names=self.cache_security_groups, security_group_ids=self.security_group_ids, cache_subnet_group_name=self.cache_subnet_group, @@ -293,7 +300,8 @@ class ElastiCacheManager(object): cache_security_group_names=self.cache_security_groups, security_group_ids=self.security_group_ids, apply_immediately=True, - engine_version=self.cache_engine_version) + engine_version=self.cache_engine_version, + cache_parameter_group_name=self.cache_parameter_group_name) except boto.exception.BotoServerError, e: self.module.fail_json(msg=e.message) @@ -478,6 +486,7 @@ def main(): name={'required': True}, engine={'required': False, 'default': 'memcached'}, cache_engine_version={'required': False}, + cache_parameter_group_name={'required': False}, node_type={'required': False, 'default': 'cache.m1.small'}, num_nodes={'required': False, 'default': None, 'type': 'int'}, cache_port={'required': False, 'type': 'int'}, @@ -505,6 +514,7 @@ def main(): state = module.params['state'] engine = module.params['engine'] cache_engine_version = module.params['cache_engine_version'] + cache_parameter_group_name = module.params['cache_parameter_group_name'] node_type = module.params['node_type'] num_nodes = module.params['num_nodes'] cache_port = module.params['cache_port'] @@ -530,12 +540,15 @@ def main(): module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) elasticache_manager = ElastiCacheManager(module, name, engine, - cache_engine_version, node_type, + cache_engine_version, + node_type, num_nodes, cache_port, cache_subnet_group, cache_security_groups, security_group_ids, zone, wait, - hard_modify, region, **aws_connect_kwargs) + hard_modify, region, + cache_parameter_group_name=cache_parameter_group_name, + **aws_connect_kwargs) if state == 'present': elasticache_manager.ensure_present() From 8052d49b07e12fdaa3af5acea2d50eb93c1b5a83 Mon Sep 17 00:00:00 2001 From: Philippe Jandot Date: Wed, 16 Sep 2015 16:49:09 +0200 Subject: [PATCH 05/86] fix regression introduced by f38186ce8b49ea98e29241712da45917a3154e73, and propose a fix for docker facts --- cloud/docker/docker.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 0ab564208ba..a052fa388d0 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -1525,7 +1525,8 @@ def present(manager, containers, count, name): delta = count - len(containers.deployed) if delta > 0: - containers.notice_changed(manager.create_containers(delta)) + created = manager.create_containers(delta) + containers.notice_changed(manager.get_inspect_containers(created)) if delta < 0: # If both running and stopped containers exist, remove @@ -1540,8 +1541,8 @@ def present(manager, containers, count, name): to_remove.append(c) manager.stop_containers(to_stop) + containers.notice_changed(manager.get_inspect_containers(to_remove)) manager.remove_containers(to_remove) - containers.notice_changed(to_remove) def started(manager, containers, count, name): '''Ensure that exactly `count` matching containers exist and are running.''' @@ -1557,13 +1558,13 @@ def started(manager, containers, count, name): created = manager.create_containers(delta) manager.start_containers(created) - containers.notice_changed(created) + containers.notice_changed(manager.get_inspect_containers(created)) if delta < 0: excess = containers.running[0:-delta] + containers.notice_changed(manager.get_inspect_containers(excess)) manager.stop_containers(excess) manager.remove_containers(excess) - containers.notice_changed(excess) def reloaded(manager, containers, count, name): ''' @@ -1597,7 +1598,7 @@ def stopped(manager, containers, count, name): containers.refresh() manager.stop_containers(containers.running) - containers.notice_changed(containers.running) + containers.notice_changed(manager.get_inspect_containers(containers.running)) def killed(manager, containers, count, name): '''Kill any matching containers that are running.''' @@ -1605,7 +1606,7 @@ def killed(manager, containers, count, name): containers.refresh() manager.kill_containers(containers.running) - containers.notice_changed(containers.running) + containers.notice_changed(manager.get_inspect_containers(containers.running)) def absent(manager, containers, count, name): '''Stop and remove any matching containers.''' @@ -1613,8 +1614,8 @@ def absent(manager, containers, count, name): containers.refresh() manager.stop_containers(containers.running) + containers.notice_changed(manager.get_inspect_containers(containers.deployed)) manager.remove_containers(containers.deployed) - containers.notice_changed(containers.deployed) def main(): module = AnsibleModule( @@ -1727,9 +1728,8 @@ def main(): module.exit_json(changed=manager.has_changed(), msg=manager.get_summary_message(), summary=manager.counters, - containers=containers.changed, reload_reasons=manager.get_reload_reason_message(), - ansible_facts=_ansible_facts(manager.get_inspect_containers(containers.changed))) + ansible_facts=_ansible_facts(containers.changed)) except DockerAPIError as e: module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation) From 5f914e854b6515515c0847ab2266ee9357853848 Mon Sep 17 00:00:00 2001 From: Gilad Peleg Date: Wed, 7 Oct 2015 14:49:05 +0300 Subject: [PATCH 06/86] Add state=running on some ec2 examples `state=running` was missing in some of the ec2 module examples --- cloud/amazon/ec2.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index 6572a9286f4..608a05fef54 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -481,6 +481,7 @@ EXAMPLES = ''' # - ec2: + state: running key_name: mykey instance_type: c1.medium image: ami-40603AD1 @@ -498,6 +499,7 @@ EXAMPLES = ''' # - ec2: + state: running key_name: mykey instance_type: c1.medium image: ami-40603AD1 From 686ceb81d7bf5c2e569ffee2cc234dd14d68fd38 Mon Sep 17 00:00:00 2001 From: Adrian Bridgett Date: Thu, 8 Oct 2015 16:25:39 +0100 Subject: [PATCH 07/86] add idle_timeout support --- cloud/amazon/ec2_elb_lb.py | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 37ba3fc1eb6..872ee2bedc7 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -102,6 +102,12 @@ options: required: false aliases: [] version_added: "1.8" + idle_timeout: + description: + - ELB connections from clients and to servers are timed out after this amount of time + required: false + aliases: [] + version_added: "2.0" cross_az_load_balancing: description: - Distribute load across all configured Availability Zones @@ -232,13 +238,14 @@ EXAMPLES = """ load_balancer_port: 80 instance_port: 80 -# Create an ELB with connection draining and cross availability +# Create an ELB with connection draining, increased idle timeout and cross availability # zone load balancing - local_action: module: ec2_elb_lb name: "New ELB" state: present connection_draining_timeout: 60 + idle_timeout: 300 cross_az_load_balancing: "yes" region: us-east-1 zones: @@ -305,7 +312,7 @@ class ElbManager(object): zones=None, purge_zones=None, security_group_ids=None, health_check=None, subnets=None, purge_subnets=None, scheme="internet-facing", connection_draining_timeout=None, - cross_az_load_balancing=None, + idle_timeout=None, cross_az_load_balancing=None, stickiness=None, region=None, **aws_connect_params): self.module = module @@ -320,6 +327,7 @@ class ElbManager(object): self.purge_subnets = purge_subnets self.scheme = scheme self.connection_draining_timeout = connection_draining_timeout + self.idle_timeout = idle_timeout self.cross_az_load_balancing = cross_az_load_balancing self.stickiness = stickiness @@ -347,6 +355,8 @@ class ElbManager(object): # set them to avoid errors if self._check_attribute_support('connection_draining'): self._set_connection_draining_timeout() + if self._check_attribute_support('connecting_settings'): + self._set_idle_timeout() if self._check_attribute_support('cross_zone_load_balancing'): self._set_cross_az_load_balancing() # add sitcky options @@ -442,6 +452,9 @@ class ElbManager(object): if self._check_attribute_support('connection_draining'): info['connection_draining_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout + if self._check_attribute_support('connecting_settings'): + info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout + if self._check_attribute_support('cross_zone_load_balancing'): is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing') if is_cross_az_lb_enabled: @@ -705,6 +718,12 @@ class ElbManager(object): attributes.connection_draining.enabled = False self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining) + def _set_idle_timeout(self): + attributes = self.elb.get_attributes() + if self.idle_timeout is not None: + attributes.connecting_settings.idle_timeout = self.idle_timeout + self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings) + def _policy_name(self, policy_type): return __file__.split('/')[-1].replace('_', '-') + '-' + policy_type @@ -829,6 +848,7 @@ def main(): purge_subnets={'default': False, 'required': False, 'type': 'bool'}, scheme={'default': 'internet-facing', 'required': False}, connection_draining_timeout={'default': None, 'required': False}, + idle_timeout={'default': None, 'required': False}, cross_az_load_balancing={'default': None, 'required': False}, stickiness={'default': None, 'required': False, 'type': 'dict'} ) @@ -859,6 +879,7 @@ def main(): purge_subnets = module.params['purge_subnets'] scheme = module.params['scheme'] connection_draining_timeout = module.params['connection_draining_timeout'] + idle_timeout = module.params['idle_timeout'] cross_az_load_balancing = module.params['cross_az_load_balancing'] stickiness = module.params['stickiness'] @@ -886,8 +907,8 @@ def main(): elb_man = ElbManager(module, name, listeners, purge_listeners, zones, purge_zones, security_group_ids, health_check, subnets, purge_subnets, scheme, - connection_draining_timeout, cross_az_load_balancing, - stickiness, + connection_draining_timeout, idle_timeout, + cross_az_load_balancing, stickiness, region=region, **aws_connect_params) # check for unsupported attributes for this version of boto @@ -897,6 +918,9 @@ def main(): if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'): module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute") + if idle_timeout and not elb_man._check_attribute_support('connecting_settings'): + module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute") + if state == 'present': elb_man.ensure_ok() elif state == 'absent': From cc821492d315d1cb92494edf93942017f61dfee8 Mon Sep 17 00:00:00 2001 From: Adrian Bridgett Date: Mon, 12 Oct 2015 17:45:37 +0100 Subject: [PATCH 08/86] remove unneeded aliases --- cloud/amazon/ec2_elb_lb.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 872ee2bedc7..9e6ef2ce51a 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -106,7 +106,6 @@ options: description: - ELB connections from clients and to servers are timed out after this amount of time required: false - aliases: [] version_added: "2.0" cross_az_load_balancing: description: From 6fc58855b931a29c2ec94efec1eacde2fd20a07d Mon Sep 17 00:00:00 2001 From: whiter Date: Thu, 15 Oct 2015 13:21:05 +1100 Subject: [PATCH 09/86] Allow iam_policy to maintain idempotentce if the role referenced has been removed --- cloud/amazon/iam_policy.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index 0d2ed506457..eeab1a7acd5 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -183,6 +183,14 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state): current_policies = [cp for cp in iam.list_role_policies(name). list_role_policies_result. policy_names] + except boto.exception.BotoServerError as e: + if e.error_code == "NoSuchEntity": + # Role doesn't exist so it's safe to assume the policy doesn't either + module.exit_json(changed=False) + else: + module.fail_json(e.message) + + try: for pol in current_policies: if urllib.unquote(iam.get_role_policy(name, pol). get_role_policy_result.policy_document) == pdoc: From 7b9c326ca635b06d617923caf67570c5ef027565 Mon Sep 17 00:00:00 2001 From: Evan Carter Date: Thu, 15 Oct 2015 15:06:32 -0400 Subject: [PATCH 10/86] add documentation stating that JSON files can be loaded with include_vars --- utilities/logic/include_vars.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utilities/logic/include_vars.py b/utilities/logic/include_vars.py index a6b2b5b152f..fb55ab5515f 100644 --- a/utilities/logic/include_vars.py +++ b/utilities/logic/include_vars.py @@ -14,7 +14,7 @@ author: "Benno Joy (@bennojoy)" module: include_vars short_description: Load variables from files, dynamically within a task. description: - - Loads variables from a YAML file dynamically during task runtime. It can work with conditionals, or use host specific variables to determine the path name to load from. + - Loads variables from a YAML/JSON file dynamically during task runtime. It can work with conditionals, or use host specific variables to determine the path name to load from. options: free-form: description: From bc4b40d8e788ca362b597c5e92670ca7ad9005c4 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 13 Oct 2015 10:04:50 -0400 Subject: [PATCH 11/86] added regex support to find, also added 'singular' aliasess to patterns and paths --- files/find.py | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/files/find.py b/files/find.py index 659ec16026e..d7042f0027f 100644 --- a/files/find.py +++ b/files/find.py @@ -50,17 +50,18 @@ options: required: false default: '*' description: - - One or more (shell type) file glob patterns, which restrict the list of files to be returned to + - One or more (shell type) patterns, which restrict the list of files to be returned to those whose basenames match at least one of the patterns specified. Multiple patterns can be - specified using a list. + specified using a list. The patterns can be simple shell globs or a python regex prefixed by a '~'. + aliases: ['pattern'] contains: required: false default: null description: - - One or more re patterns which should be matched against the file content + - One or more re patterns which should be matched against the file content paths: required: true - aliases: [ "name" ] + aliases: [ "name", "path" ] description: - List of paths to the file or directory to search. All paths must be fully qualified. file_type: @@ -121,8 +122,9 @@ EXAMPLES = ''' # Recursively find /var/tmp files with last access time greater than 3600 seconds - find: paths="/var/tmp" age="3600" age_stamp=atime recurse=yes -# find /var/log files equal or greater than 10 megabytes ending with .log or .log.gz -- find: paths="/var/tmp" patterns="*.log","*.log.gz" size="10m" +# find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex +- find: paths="/var/tmp" patterns="~.*\.(?:old|log\.gz)$" size="10m" + ''' RETURN = ''' @@ -157,9 +159,11 @@ def pfilter(f, patterns=None): if patterns is None: return True for p in patterns: - if fnmatch.fnmatch(f, p): - return True - return False + if p.startswith('~'): + r = re.compile(p[1:]) + return r.match(f) + else: + return fnmatch.fnmatch(f, p) def agefilter(st, now, age, timestamp): @@ -236,8 +240,8 @@ def statinfo(st): def main(): module = AnsibleModule( argument_spec = dict( - paths = dict(required=True, aliases=['name'], type='list'), - patterns = dict(default=['*'], type='list'), + paths = dict(required=True, aliases=['name','path'], type='list'), + patterns = dict(default=['*'], type='list', aliases['pattern']), contains = dict(default=None, type='str'), file_type = dict(default="file", choices=['file', 'directory'], type='str'), age = dict(default=None, type='str'), From fdd88863d485b5003838459fb9574c5da6ee1598 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 13 Oct 2015 18:43:52 -0400 Subject: [PATCH 12/86] fixed bug in spec --- files/find.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/find.py b/files/find.py index d7042f0027f..10d5de834f3 100644 --- a/files/find.py +++ b/files/find.py @@ -241,7 +241,7 @@ def main(): module = AnsibleModule( argument_spec = dict( paths = dict(required=True, aliases=['name','path'], type='list'), - patterns = dict(default=['*'], type='list', aliases['pattern']), + patterns = dict(default=['*'], type='list', aliases=['pattern']), contains = dict(default=None, type='str'), file_type = dict(default="file", choices=['file', 'directory'], type='str'), age = dict(default=None, type='str'), From e603b1bb693987c732ea116526d164993d2681db Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 19 Oct 2015 17:42:36 -0400 Subject: [PATCH 13/86] changed so regexes and shell globs work transparently --- files/find.py | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/files/find.py b/files/find.py index 10d5de834f3..9ec6f6eb8b3 100644 --- a/files/find.py +++ b/files/find.py @@ -50,9 +50,9 @@ options: required: false default: '*' description: - - One or more (shell type) patterns, which restrict the list of files to be returned to + - One or more (shell or regex) patterns, which restrict the list of files to be returned to those whose basenames match at least one of the patterns specified. Multiple patterns can be - specified using a list. The patterns can be simple shell globs or a python regex prefixed by a '~'. + specified using a list. aliases: ['pattern'] contains: required: false @@ -123,7 +123,7 @@ EXAMPLES = ''' - find: paths="/var/tmp" age="3600" age_stamp=atime recurse=yes # find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex -- find: paths="/var/tmp" patterns="~.*\.(?:old|log\.gz)$" size="10m" +- find: paths="/var/tmp" patterns="^.*?\.(?:old|log\.gz)$" size="10m" ''' @@ -156,14 +156,25 @@ examined: def pfilter(f, patterns=None): '''filter using glob patterns''' + if patterns is None: return True + + match = False for p in patterns: - if p.startswith('~'): - r = re.compile(p[1:]) - return r.match(f) - else: - return fnmatch.fnmatch(f, p) + try: + r = re.compile(p) + match = r.match(f) + except: + pass + + if not match: + match = fnmatch.fnmatch(f, p) + + if match: + break + + return match def agefilter(st, now, age, timestamp): From 074aad23e7f39274c165247afeb15d996604e5cc Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 19 Oct 2015 20:43:50 -0400 Subject: [PATCH 14/86] final form, use_regex now controls if patterns is glob or regex - fixed cases in which stat fails (dangling symlink) - now properly reports name of skipped paths --- files/find.py | 57 +++++++++++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 24 deletions(-) diff --git a/files/find.py b/files/find.py index 9ec6f6eb8b3..04ecddfe607 100644 --- a/files/find.py +++ b/files/find.py @@ -25,8 +25,6 @@ import stat import fnmatch import time import re -import shutil - DOCUMENTATION = ''' --- @@ -50,9 +48,9 @@ options: required: false default: '*' description: - - One or more (shell or regex) patterns, which restrict the list of files to be returned to - those whose basenames match at least one of the patterns specified. Multiple patterns can be - specified using a list. + - One or more (shell or regex) patterns, which type is controled by C(use_regex) option. + - The patterns restrict the list of files to be returned to those whose basenames match at + least one of the patterns specified. Multiple patterns can be specified using a list. aliases: ['pattern'] contains: required: false @@ -109,6 +107,12 @@ options: choices: [ True, False ] description: - Set this to true to retrieve a file's sha1 checksum + use_regex: + required: false + default: "False" + choices: [ True, False ] + description: + - If false the patterns are file globs (shell) if true they are python regexes ''' @@ -122,9 +126,11 @@ EXAMPLES = ''' # Recursively find /var/tmp files with last access time greater than 3600 seconds - find: paths="/var/tmp" age="3600" age_stamp=atime recurse=yes -# find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex -- find: paths="/var/tmp" patterns="^.*?\.(?:old|log\.gz)$" size="10m" +# find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz +- find: paths="/var/tmp" patterns="'*.old','*.log.gz'" size="10m" +# find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex +- find: paths="/var/tmp" patterns="^.*?\.(?:old|log\.gz)$" size="10m" use_regex=True ''' RETURN = ''' @@ -154,27 +160,24 @@ examined: sample: 34 ''' -def pfilter(f, patterns=None): +def pfilter(f, patterns=None, use_regex=False): '''filter using glob patterns''' if patterns is None: return True - match = False - for p in patterns: - try: + if use_regex: + for p in patterns: r = re.compile(p) - match = r.match(f) - except: - pass - - if not match: - match = fnmatch.fnmatch(f, p) + if r.match(f): + return True + else: - if match: - break + for p in patterns: + if fnmatch.fnmatch(f, p): + return True - return match + return False def agefilter(st, now, age, timestamp): @@ -262,6 +265,7 @@ def main(): hidden = dict(default="False", type='bool'), follow = dict(default="False", type='bool'), get_checksum = dict(default="False", type='bool'), + use_regex = dict(default="False", type='bool'), ), ) @@ -307,16 +311,21 @@ def main(): if os.path.basename(fsname).startswith('.') and not params['hidden']: continue - st = os.stat(fsname) + try: + st = os.stat(fsname) + except: + msg+="%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname + continue + r = {'path': fsname} if stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory': - if pfilter(fsobj, params['patterns']) and agefilter(st, now, age, params['age_stamp']): + if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']): r.update(statinfo(st)) filelist.append(r) elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file': - if pfilter(fsobj, params['patterns']) and \ + if pfilter(fsobj, params['patterns'], params['use_regex']) and \ agefilter(st, now, age, params['age_stamp']) and \ sizefilter(st, size) and \ contentfilter(fsname, params['contains']): @@ -329,7 +338,7 @@ def main(): if not params['recurse']: break else: - msg+="%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" + msg+="%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath matched = len(filelist) module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked) From 2b2285eb675b94d452eb3a1416ef7e8ada3ac1bc Mon Sep 17 00:00:00 2001 From: Etherdaemon Date: Tue, 20 Oct 2015 16:38:27 +1000 Subject: [PATCH 15/86] fixes #12831 by updating the boto iam connection method to connect_to_region --- cloud/amazon/iam.py | 2 +- cloud/amazon/iam_cert.py | 3 ++- cloud/amazon/iam_policy.py | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index 8864cb10a6f..2d4b908e80e 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -565,7 +565,7 @@ def main(): region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) try: - iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) + iam = boto.iam.connect_to_region(region, **aws_connect_kwargs) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) diff --git a/cloud/amazon/iam_cert.py b/cloud/amazon/iam_cert.py index 0c36abef322..2c36f921212 100644 --- a/cloud/amazon/iam_cert.py +++ b/cloud/amazon/iam_cert.py @@ -107,6 +107,7 @@ import sys try: import boto import boto.iam + import boto.ec2 HAS_BOTO = True except ImportError: HAS_BOTO = False @@ -246,7 +247,7 @@ def main(): region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) try: - iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) + iam = boto.iam.connect_to_region(region, **aws_connect_kwargs) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index 0d2ed506457..b5242577ff4 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -307,7 +307,7 @@ def main(): region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) try: - iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) + iam = boto.iam.connect_to_region(region, **aws_connect_kwargs) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) From 735eefb2ca44419e368e67b45d40787927238f37 Mon Sep 17 00:00:00 2001 From: Kevin Falcone Date: Wed, 21 Oct 2015 16:43:50 -0400 Subject: [PATCH 16/86] Mark this as a string so it is rendered in the docs When this was treated as a boolean, sphinx was leaving the Default column on http://docs.ansible.com/ansible/ec2_module.html blank, implying it would use AWS's default. In reality, it passes False, which overrides the defaults at AWS (it's possible to boot an instance which AWS claims will always have EBS optimization without it because of this silently passed False). --- cloud/amazon/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py index ed36b855480..256c16decfd 100644 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -225,7 +225,7 @@ options: description: - whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) required: false - default: false + default: 'false' exact_count: version_added: "1.5" description: From 08559670e5f2d8104cc94de3914ec039883766a5 Mon Sep 17 00:00:00 2001 From: Etherdaemon Date: Thu, 22 Oct 2015 09:12:58 +1000 Subject: [PATCH 17/86] Update try statement as pointed out by defionscode --- cloud/amazon/iam.py | 5 ++++- cloud/amazon/iam_cert.py | 5 ++++- cloud/amazon/iam_policy.py | 5 ++++- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py index 2d4b908e80e..c1cd79f9a3f 100644 --- a/cloud/amazon/iam.py +++ b/cloud/amazon/iam.py @@ -565,7 +565,10 @@ def main(): region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) try: - iam = boto.iam.connect_to_region(region, **aws_connect_kwargs) + if region: + iam = boto.iam.connect_to_region(region, **aws_connect_kwargs) + else: + iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) diff --git a/cloud/amazon/iam_cert.py b/cloud/amazon/iam_cert.py index 2c36f921212..cc79d1cdc1c 100644 --- a/cloud/amazon/iam_cert.py +++ b/cloud/amazon/iam_cert.py @@ -247,7 +247,10 @@ def main(): region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) try: - iam = boto.iam.connect_to_region(region, **aws_connect_kwargs) + if region: + iam = boto.iam.connect_to_region(region, **aws_connect_kwargs) + else: + iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py index b5242577ff4..c17e1278ddc 100644 --- a/cloud/amazon/iam_policy.py +++ b/cloud/amazon/iam_policy.py @@ -307,7 +307,10 @@ def main(): region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) try: - iam = boto.iam.connect_to_region(region, **aws_connect_kwargs) + if region: + iam = boto.iam.connect_to_region(region, **aws_connect_kwargs) + else: + iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) From 5d73a9a4c5c36a4997a0019fed37bc5c0cd1fa3b Mon Sep 17 00:00:00 2001 From: David Shrewsbury Date: Thu, 22 Oct 2015 11:52:28 -0400 Subject: [PATCH 18/86] Fix for routers without external interfaces --- cloud/openstack/os_router.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/cloud/openstack/os_router.py b/cloud/openstack/os_router.py index e5eb9bdc987..d8d547f5f1f 100644 --- a/cloud/openstack/os_router.py +++ b/cloud/openstack/os_router.py @@ -164,10 +164,13 @@ def _needs_update(cloud, module, router, network, internal_subnet_ids): """ if router['admin_state_up'] != module.params['admin_state_up']: return True - if router['external_gateway_info'].get('enable_snat', True) != module.params['enable_snat']: - return True + if router['external_gateway_info']: + if router['external_gateway_info'].get('enable_snat', True) != module.params['enable_snat']: + return True if network: - if router['external_gateway_info']['network_id'] != network['id']: + if not router['external_gateway_info']: + return True + elif router['external_gateway_info']['network_id'] != network['id']: return True # check external interfaces From d73f5a4adbaac2591e2a00f4e17dbae854c73765 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Thu, 22 Oct 2015 13:27:17 -0400 Subject: [PATCH 19/86] return information about created subnet make os_subnet behave like os_network in terms of returning information about the created resource. With this commit, os_subnet will return the created subnet in `subnet` and the subnet id in `id`. --- cloud/openstack/os_subnet.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/openstack/os_subnet.py b/cloud/openstack/os_subnet.py index d54268f415a..1913d95ce7e 100644 --- a/cloud/openstack/os_subnet.py +++ b/cloud/openstack/os_subnet.py @@ -302,7 +302,9 @@ def main(): changed = True else: changed = False - module.exit_json(changed=changed) + module.exit_json(changed=changed, + subnet=subnet, + id=subnet['id']) elif state == 'absent': if not subnet: From c54c5c8234bcf47ffb0cd39397ba2ee01d8e39e5 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Thu, 22 Oct 2015 13:38:32 -0400 Subject: [PATCH 20/86] make os_network correctly report changed status The `os_network` module was incorrectly returning changed=False whether or not the network was created. This commit makes the changed return value useful. --- cloud/openstack/os_network.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cloud/openstack/os_network.py b/cloud/openstack/os_network.py index bc41d3870f4..82863ebd6b5 100644 --- a/cloud/openstack/os_network.py +++ b/cloud/openstack/os_network.py @@ -146,7 +146,10 @@ def main(): if state == 'present': if not net: net = cloud.create_network(name, shared, admin_state_up, external) - module.exit_json(changed=False, network=net, id=net['id']) + changed = True + else: + changed = False + module.exit_json(changed=changed, network=net, id=net['id']) elif state == 'absent': if not net: From 8368da4297b8aab19cfa82080163a6d464f3449c Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Thu, 22 Oct 2015 13:56:56 -0400 Subject: [PATCH 21/86] make os_flavor return a top-level 'id' key make os_flavor return a top-level 'id' key, much like other os_* resources. --- cloud/openstack/os_nova_flavor.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cloud/openstack/os_nova_flavor.py b/cloud/openstack/os_nova_flavor.py index 82b3a53aa3d..f7924030461 100644 --- a/cloud/openstack/os_nova_flavor.py +++ b/cloud/openstack/os_nova_flavor.py @@ -217,8 +217,13 @@ def main(): rxtx_factor=module.params['rxtx_factor'], is_public=module.params['is_public'] ) - module.exit_json(changed=True, flavor=flavor) - module.exit_json(changed=False, flavor=flavor) + changed=True + else: + changed=False + + module.exit_json(changed=changed, + flavor=flavor, + id=flavor['id']) elif state == 'absent': if flavor: From 377811dac2cc69f1d51fcf530b3087ccfb8b70ea Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Thu, 22 Oct 2015 13:59:45 -0400 Subject: [PATCH 22/86] make os_keypair return a top level 'id' key make os_keypair return a top-level 'id' key, much like other os_* resources. --- cloud/openstack/os_keypair.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/cloud/openstack/os_keypair.py b/cloud/openstack/os_keypair.py index f62cc51bf64..64ebd8c67be 100644 --- a/cloud/openstack/os_keypair.py +++ b/cloud/openstack/os_keypair.py @@ -146,10 +146,14 @@ def main(): " as offered. Delete key first." % name ) else: - module.exit_json(changed=False, key=keypair) - - new_key = cloud.create_keypair(name, public_key) - module.exit_json(changed=True, key=new_key) + changed = False + else: + keypair = cloud.create_keypair(name, public_key) + changed = True + + module.exit_json(changed=changed, + key=keypair, + id=keypair['id']) elif state == 'absent': if keypair: From e65c0f896a3938738e861325f39b87d6e1114442 Mon Sep 17 00:00:00 2001 From: Simon Hafner Date: Thu, 22 Oct 2015 22:26:51 +0200 Subject: [PATCH 23/86] simpler way to check if systemd is the init system According to http://www.freedesktop.org/software/systemd/man/sd_booted.html check if the directory /run/systemd/system/ exists. --- system/service.py | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/system/service.py b/system/service.py index f9a8b1e24c1..227d34c1463 100644 --- a/system/service.py +++ b/system/service.py @@ -403,22 +403,7 @@ class LinuxService(Service): self.svc_initscript = initscript def check_systemd(): - # verify systemd is installed (by finding systemctl) - if not location.get('systemctl', False): - return False - - # Check if init is the systemd command, using comm as cmdline could be symlink - try: - f = open('/proc/1/comm', 'r') - except IOError, err: - # If comm doesn't exist, old kernel, no systemd - return False - - for line in f: - if 'systemd' in line: - return True - - return False + return os.path.exists("/run/systemd/system/") # Locate a tool to enable/disable a service if location.get('systemctl',False) and check_systemd(): From 32a21a9b61028f8a6bca4345fbff548ba115b6ff Mon Sep 17 00:00:00 2001 From: chriskarel Date: Thu, 22 Oct 2015 16:49:34 -0500 Subject: [PATCH 24/86] Update hostname.py Added support for Oracle Linux. (http://www.oracle.com/linux) --- system/hostname.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/system/hostname.py b/system/hostname.py index 9e7f6a4ef70..2914088691a 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -481,6 +481,15 @@ class ScientificLinuxHostname(Hostname): else: strategy_class = RedHatStrategy +class OracleLinuxHostname(Hostname): + platform = 'Linux' + distribution = 'Oracle linux server' + distribution_version = get_distribution_version() + if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): + strategy_class = SystemdStrategy + else: + strategy_class = RedHatStrategy + class AmazonLinuxHostname(Hostname): platform = 'Linux' distribution = 'Amazon' From da0fbfc564087f8c05e698d1953a7f9c0dbeab10 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Thu, 22 Oct 2015 15:22:50 -0400 Subject: [PATCH 25/86] allow empty description attribute for os_security_group The `os_security_group` module would fail if there was no `description:` attribute: localhost | FAILED! => { "changed": false, "failed": true, "msg": "Error creating security group larstest: Invalid input for description. Reason: 'None' is not a valid string." } This commit makes the default description `''` rather than `None`. --- cloud/openstack/os_security_group.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/openstack/os_security_group.py b/cloud/openstack/os_security_group.py index e42b7f938f5..fd839755144 100644 --- a/cloud/openstack/os_security_group.py +++ b/cloud/openstack/os_security_group.py @@ -91,7 +91,7 @@ def _system_state_change(module, secgroup): def main(): argument_spec = openstack_full_argument_spec( name=dict(required=True), - description=dict(default=None), + description=dict(default=''), state=dict(default='present', choices=['absent', 'present']), ) From fb10161510996949e990e2ea7b84cc98fc9989df Mon Sep 17 00:00:00 2001 From: Ryan Sydnor Date: Tue, 13 Oct 2015 21:49:54 -0400 Subject: [PATCH 26/86] Add capability for stat module to use more hash algorithms Specifically, the stat module now has a checksum_algorithm parameter. This lets the module utilize one of the hash algorithms available on the host to return the checksum of the file. This change is backwards compatible. The checksum_algorithm defaults to sha1 and still returns its result to the stat.checksum property. --- files/stat.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/files/stat.py b/files/stat.py index 8f2bd289bc0..61c77a2ef31 100644 --- a/files/stat.py +++ b/files/stat.py @@ -42,11 +42,18 @@ options: aliases: [] get_checksum: description: - - Whether to return a checksum of the file (currently sha1) + - Whether to return a checksum of the file (default sha1) required: false default: yes aliases: [] version_added: "1.8" + checksum_algorithm: + description: + - Algorithm to determine checksum of file. Will throw an error if the host is unable to use specified algorithm. + required: false + choices: [ 'sha1', 'sha224', 'sha256', 'sha384', 'sha512' ] + default: sha1 + version_added: "2.0" author: "Bruce Pennypacker (@bpennypacker)" ''' @@ -84,6 +91,9 @@ EXAMPLES = ''' # Don't do md5 checksum - stat: path=/path/to/myhugefile get_md5=no + +# Use sha256 to calculate checksum +- stat: path=/path/to/something checksum_algorithm=sha256 ''' RETURN = ''' @@ -254,7 +264,7 @@ stat: sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0 checksum: description: hash of the path - returned: success, path exists and user can read stats and path supports hashing + returned: success, path exists, user can read stats, path supports hashing and supplied checksum algorithm is available type: string sample: 50ba294cdf28c0d5bcde25708df53346825a429f pw_name: @@ -281,7 +291,8 @@ def main(): path = dict(required=True), follow = dict(default='no', type='bool'), get_md5 = dict(default='yes', type='bool'), - get_checksum = dict(default='yes', type='bool') + get_checksum = dict(default='yes', type='bool'), + checksum_algorithm = dict(default='sha1', type='str', choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512']) ), supports_check_mode = True ) @@ -291,6 +302,7 @@ def main(): follow = module.params.get('follow') get_md5 = module.params.get('get_md5') get_checksum = module.params.get('get_checksum') + checksum_algorithm = module.params.get('checksum_algorithm') try: if follow: @@ -351,8 +363,7 @@ def main(): d['md5'] = None if S_ISREG(mode) and get_checksum and os.access(path,os.R_OK): - d['checksum'] = module.sha1(path) - + d['checksum'] = module.digest_from_file(path, checksum_algorithm) try: pw = pwd.getpwuid(st.st_uid) @@ -370,4 +381,4 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +main() \ No newline at end of file From b0c10a7d31f0e632b3d115493d0a718291e19291 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Thu, 22 Oct 2015 21:41:17 -0400 Subject: [PATCH 27/86] allow os_server to accept a list of security groups This commit allows the `security_groups` parameter of the `os_server` module to be either a YAML list or a common-delimited string (much like the `nics` attribute). E.g., this: - os_nova_server: [...] security_groups: - default - webserver Or this: - os_nova_server: [...] security_groups: default,webserver --- cloud/openstack/os_server.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py index aa06899213f..189840e2498 100644 --- a/cloud/openstack/os_server.py +++ b/cloud/openstack/os_server.py @@ -76,7 +76,8 @@ options: default: None security_groups: description: - - The name of the security group to which the instance should be added + - Names of the security groups to which the instance should be + added. This may be a YAML list or a common separated string. required: false default: None nics: @@ -366,7 +367,7 @@ def _create_server(module, cloud): flavor=flavor_dict['id'], nics=nics, meta=module.params['meta'], - security_groups=module.params['security_groups'].split(','), + security_groups=module.params['security_groups'], userdata=module.params['userdata'], config_drive=module.params['config_drive'], ) @@ -459,7 +460,7 @@ def main(): flavor_ram = dict(default=None, type='int'), flavor_include = dict(default=None), key_name = dict(default=None), - security_groups = dict(default='default'), + security_groups = dict(default=['default'], type='list'), nics = dict(default=[], type='list'), meta = dict(default=None), userdata = dict(default=None), From a2fe8dba68ae83903c2b84d5099f57af5451a17e Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Fri, 23 Oct 2015 09:44:07 -0400 Subject: [PATCH 28/86] allow os_port to accept a list of security groups with this commit, the `security_groups` attribute for `os_port` will accept either a common-delimited string or ` YAML list. That is, either this: - os_port: [...] security_groups: group1,group2 Or this: - os_port: [...] security_groups: - group1 - group2 --- cloud/openstack/os_port.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/cloud/openstack/os_port.py b/cloud/openstack/os_port.py index 2ee1ab568a6..d218e938b10 100644 --- a/cloud/openstack/os_port.py +++ b/cloud/openstack/os_port.py @@ -61,8 +61,7 @@ options: security_groups: description: - Security group(s) ID(s) or name(s) associated with the port (comma - separated for multiple security groups - no spaces between comma(s) - or YAML list). + separated string or YAML list) required: false default: None no_security_groups: @@ -220,7 +219,7 @@ def _needs_update(module, port, cloud): 'device_id'] compare_dict = ['allowed_address_pairs', 'extra_dhcp_opt'] - compare_comma_separated_list = ['security_groups'] + compare_list = ['security_groups'] for key in compare_simple: if module.params[key] is not None and module.params[key] != port[key]: @@ -229,7 +228,7 @@ def _needs_update(module, port, cloud): if module.params[key] is not None and cmp(module.params[key], port[key]) != 0: return True - for key in compare_comma_separated_list: + for key in compare_list: if module.params[key] is not None and (set(module.params[key]) != set(port[key])): return True @@ -309,7 +308,7 @@ def main(): fixed_ips=dict(default=None), admin_state_up=dict(default=None), mac_address=dict(default=None), - security_groups=dict(default=None), + security_groups=dict(default=None, type='list'), no_security_groups=dict(default=False, type='bool'), allowed_address_pairs=dict(default=None), extra_dhcp_opt=dict(default=None), @@ -336,13 +335,11 @@ def main(): try: cloud = shade.openstack_cloud(**module.params) if module.params['security_groups']: - if type(module.params['security_groups']) == str: - module.params['security_groups'] = module.params[ - 'security_groups'].split(',') # translate security_groups to UUID's if names where provided - module.params['security_groups'] = map( - lambda v: get_security_group_id(module, cloud, v), - module.params['security_groups']) + module.params['security_groups'] = [ + get_security_group_id(module, cloud, v) + for v in module.params['security_groups'] + ] port = None network_id = None From 5c2527d8e95c86d9c05deefd05c44428d2b9b2f4 Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 23 Oct 2015 13:32:32 -0400 Subject: [PATCH 29/86] Version bump for new beta 2.0.0-0.4.beta2 --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 8b31b2b4fdb..f802f1a2cdb 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0.0-0.3.beta1 +2.0.0-0.4.beta2 From d82460a3728b9208a380ddb9698193722cb4eec8 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Fri, 23 Oct 2015 13:32:37 -0400 Subject: [PATCH 30/86] make os_router return a top level 'id' key make os_router return a top-level 'id' key, much like other os_* resources. --- cloud/openstack/os_router.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloud/openstack/os_router.py b/cloud/openstack/os_router.py index d8d547f5f1f..d48ed0417f1 100644 --- a/cloud/openstack/os_router.py +++ b/cloud/openstack/os_router.py @@ -335,7 +335,9 @@ def main(): changed = True - module.exit_json(changed=changed, router=router) + module.exit_json(changed=changed, + router=router, + id=router['id']) elif state == 'absent': if not router: From 6233e72001c4c8d785b7e82123ee6fb4c8c93a6e Mon Sep 17 00:00:00 2001 From: Simon Hafner Date: Sat, 24 Oct 2015 00:18:43 +0200 Subject: [PATCH 31/86] added earlier paths to systemd https://github.com/systemd/systemd/commit/2b583ce6576d4a074ce6f1570b3e60b65c64ae7d https://github.com/systemd/systemd/commit/b925e72633bf98438f56a140520e07ec8c959e46 --- system/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/service.py b/system/service.py index 227d34c1463..d08b6cd6746 100644 --- a/system/service.py +++ b/system/service.py @@ -403,7 +403,7 @@ class LinuxService(Service): self.svc_initscript = initscript def check_systemd(): - return os.path.exists("/run/systemd/system/") + return os.path.exists("/run/systemd/system/") or os.path.exists("/dev/.run/systemd/") or os.path.exists("/dev/.systemd/") # Locate a tool to enable/disable a service if location.get('systemctl',False) and check_systemd(): From 4072bc1da0010750d7d7dee32a9bd00d5222cc6e Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Fri, 23 Oct 2015 18:59:05 -0400 Subject: [PATCH 32/86] rearranged systemd check, removed redundant systemctl check fixed unused cmd and state var assignements --- system/service.py | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/system/service.py b/system/service.py index d08b6cd6746..2b8dbb8696c 100644 --- a/system/service.py +++ b/system/service.py @@ -395,7 +395,7 @@ class LinuxService(Service): location = dict() for binary in binaries: - location[binary] = self.module.get_bin_path(binary) + location[binary] = self.module.get_bin_path(binary, opt_dirs=paths) for initdir in initpaths: initscript = "%s/%s" % (initdir,self.name) @@ -403,10 +403,31 @@ class LinuxService(Service): self.svc_initscript = initscript def check_systemd(): - return os.path.exists("/run/systemd/system/") or os.path.exists("/dev/.run/systemd/") or os.path.exists("/dev/.systemd/") + + # tools must be installed + if location.get('systemctl',False): + + # this should show if systemd is the boot init system + # these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html + for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]: + if os.path.exists(canary): + return True + + # If all else fails, check if init is the systemd command, using comm as cmdline could be symlink + try: + f = open('/proc/1/comm', 'r') + except IOError: + # If comm doesn't exist, old kernel, no systemd + return False + + for line in f: + if 'systemd' in line: + return True + + return False # Locate a tool to enable/disable a service - if location.get('systemctl',False) and check_systemd(): + if check_systemd(): # service is managed by systemd self.__systemd_unit = self.name self.svc_cmd = location['systemctl'] @@ -684,7 +705,8 @@ class LinuxService(Service): (rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name)) if not self.name in out: self.module.fail_json(msg="service %s does not support chkconfig" % self.name) - state = out.split()[-1] + #TODO: look back on why this is here + #state = out.split()[-1] # Check if we're already in the correct state if "3:%s" % action in out and "5:%s" % action in out: @@ -946,7 +968,6 @@ class FreeBsdService(Service): self.rcconf_file = rcfile rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments)) - cmd = "%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments) try: rcvars = shlex.split(stdout, comments=True) except: From 06f301b05b384c0a8e81b92c5c2333b2e66e2767 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 26 Oct 2015 08:36:30 -0700 Subject: [PATCH 33/86] Note the difference between yum package groups and environment groups. Fixes https://github.com/ansible/ansible/issues/12873 --- packaging/os/yum.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index bcf9b283a95..1c8f0c2e127 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -130,6 +130,15 @@ notes: that the other packages come from (such as epel-release) then that package needs to be installed in a separate task. This mimics yum's command line behaviour. + - 'Yum itself has two types of groups. "Package groups" are specified in the + rpm itself while "environment groups" are specified in a separate file + (usually by the distribution). Unfortunately, this division becomes + apparent to ansible users because ansible needs to operate on the group + of packages in a single transaction and yum requires groups to be specified + in different ways when used in that way. Package groups are specified as + "@development-tools" and environment groups are "@^gnome-desktop-environment'. + Use the "yum group list" command to see which category of group the group + you want to install falls into.' # informational: requirements for nodes requirements: [ yum ] author: @@ -161,6 +170,9 @@ EXAMPLES = ''' - name: install the 'Development tools' package group yum: name="@Development tools" state=present + +- name: install the 'Gnome desktop' environment group + yum: name="@^gnome-desktop-environment" state=present ''' # 64k. Number of bytes to read at a time when manually downloading pkgs via a url From 2a93f218216a740d705552d62edc898a276ef507 Mon Sep 17 00:00:00 2001 From: Patrick Galbraith Date: Mon, 26 Oct 2015 13:28:10 -0400 Subject: [PATCH 34/86] Fix to issue 12912. Supply 'force' to install of python-apt. --- packaging/os/apt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 1fd770f710e..d99eb85ff7e 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -559,7 +559,7 @@ def main(): if not HAS_PYTHON_APT: try: - module.run_command('apt-get update && apt-get install python-apt -y -q', use_unsafe_shell=True, check_rc=True) + module.run_command('apt-get update && apt-get install python-apt -y -q --force-yes', use_unsafe_shell=True, check_rc=True) global apt, apt_pkg import apt import apt.debfile From 3993f4e9674ad3d325aed1c3ca43f5e2f81c9b9c Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 26 Oct 2015 13:01:01 -0700 Subject: [PATCH 35/86] Simplify logic to handle options set to empty string Fixes #2125 --- files/ini_file.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/files/ini_file.py b/files/ini_file.py index fff153af6ad..cb3edb2cff2 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -65,6 +65,12 @@ options: description: - all arguments accepted by the M(file) module also work here required: false + state: + description: + - If set to C(absent) the option or section will be removed if present instead of created. + required: false + default: "present" + choices: [ "present", "absent" ] notes: - While it is possible to add an I(option) without specifying a I(value), this makes no sense. @@ -110,21 +116,14 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese if state == 'absent': - if option is None and value is None: - if cp.has_section(section): - cp.remove_section(section) - changed = True + if option is None: + changed = cp.remove_section(section) else: - if option is not None: - try: - if cp.get(section, option): - cp.remove_option(section, option) - changed = True - except ConfigParser.InterpolationError: - cp.remove_option(section, option) - changed = True - except: - pass + try: + changed = cp.remove_option(section, option) + except ConfigParser.NoSectionError: + # Option isn't present if the section isn't either + pass if state == 'present': @@ -212,4 +211,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() From e101657722fa6457a97141858449b870e269f6af Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Tue, 27 Oct 2015 13:03:51 -0400 Subject: [PATCH 36/86] Update error message to be more explicit --- database/mysql/mysql_variables.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/database/mysql/mysql_variables.py b/database/mysql/mysql_variables.py index d7187e85733..ab4848d6938 100644 --- a/database/mysql/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -244,7 +244,8 @@ def main(): db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql") cursor = db_connection.cursor() except Exception, e: - module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") + errno, errstr = e.args + module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials (ERROR: %s %s)" % (errno, errstr)) mysqlvar_val = getvariable(cursor, mysqlvar) if mysqlvar_val is None: module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False) From 51db236aa7556f68d442386a6ea8f2938dcfc5a6 Mon Sep 17 00:00:00 2001 From: Jonathan Mainguy Date: Tue, 27 Oct 2015 13:17:24 -0400 Subject: [PATCH 37/86] Update doc to reflect password is required if adding a new user --- database/mysql/mysql_user.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index 1ea54b41b3a..3ac7c0890cd 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -32,7 +32,7 @@ options: required: true password: description: - - set the user's password + - set the user's password. (Required when adding a user) required: false default: null host: From eeaeeb5a1ffe81e61197791a4ab3b5e2ac2d1f07 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 27 Oct 2015 12:51:48 -0700 Subject: [PATCH 38/86] Correct typo in yum module docs --- packaging/os/yum.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 1c8f0c2e127..e1e3341a075 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -136,7 +136,7 @@ notes: apparent to ansible users because ansible needs to operate on the group of packages in a single transaction and yum requires groups to be specified in different ways when used in that way. Package groups are specified as - "@development-tools" and environment groups are "@^gnome-desktop-environment'. + "@development-tools" and environment groups are "@^gnome-desktop-environment". Use the "yum group list" command to see which category of group the group you want to install falls into.' # informational: requirements for nodes From 45a9f0b4536b30dd8e796e7c02ba0510fc3ca008 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Wed, 28 Oct 2015 09:31:18 -0400 Subject: [PATCH 39/86] since find doesn't make changes, support check mode and gather data for other tasks in check mode --- files/find.py | 1 + 1 file changed, 1 insertion(+) diff --git a/files/find.py b/files/find.py index 04ecddfe607..d6d1d42c4fd 100644 --- a/files/find.py +++ b/files/find.py @@ -267,6 +267,7 @@ def main(): get_checksum = dict(default="False", type='bool'), use_regex = dict(default="False", type='bool'), ), + supports_check_mode=True, ) params = module.params From 22c2789b72c6ed8fa0735fd0aef81858372e1b8e Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Wed, 28 Oct 2015 08:50:47 -0700 Subject: [PATCH 40/86] Document and return an error if httplib2 >= 0.7 is not present. We can't use httplib2 0.6.x and below because they do not verify TLS certificates and thus are insecure. Fixes #1875 --- network/basics/uri.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/network/basics/uri.py b/network/basics/uri.py index 1b3ace2eccd..5c0907523b8 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -25,6 +25,8 @@ import shutil import tempfile import base64 import datetime +from distutils.version import LooseVersion + try: import json except ImportError: @@ -143,7 +145,8 @@ options: version_added: '1.9.2' # informational: requirements for nodes -requirements: [ urlparse, httplib2 ] +requirements: + - httplib2 >= 0.7.0 author: "Romeo Theriault (@romeotheriault)" ''' @@ -198,11 +201,15 @@ EXAMPLES = ''' ''' -HAS_HTTPLIB2 = True +HAS_HTTPLIB2 = False + try: import httplib2 -except ImportError: - HAS_HTTPLIB2 = False + if LooseVersion(httplib2.__version__) >= LooseVersion('0.7'): + HAS_HTTPLIB2 = True +except ImportError, AttributeError: + # AttributeError if __version__ is not present + pass HAS_URLPARSE = True @@ -382,7 +389,7 @@ def main(): ) if not HAS_HTTPLIB2: - module.fail_json(msg="httplib2 is not installed") + module.fail_json(msg="httplib2 >= 0.7 is not installed") if not HAS_URLPARSE: module.fail_json(msg="urlparse is not installed") From 43cecd3ceede3102306d56a05d0f93a25917f3ff Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Tue, 27 Oct 2015 17:26:51 -0700 Subject: [PATCH 41/86] Use select in wait_for so that we don't get stuck in cornercases: * reading from a socket that gave some data we weren't looking for and then closed. * read from a socket that stays open and never sends data. * reading from a socket that sends data but not the data we're looking for. Fixes #2051 --- utilities/logic/wait_for.py | 100 ++++++++++++++++++++++-------------- 1 file changed, 62 insertions(+), 38 deletions(-) diff --git a/utilities/logic/wait_for.py b/utilities/logic/wait_for.py index 295155f3028..1287d9b6057 100644 --- a/utilities/logic/wait_for.py +++ b/utilities/logic/wait_for.py @@ -18,12 +18,14 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -import socket +import binascii import datetime -import time -import sys +import math import re -import binascii +import select +import socket +import sys +import time HAS_PSUTIL = False try: @@ -349,6 +351,10 @@ def main(): state = params['state'] path = params['path'] search_regex = params['search_regex'] + if search_regex is not None: + compiled_search_re = re.compile(search_regex, re.MULTILINE) + else: + compiled_search_re = None if port and path: module.fail_json(msg="port and path parameter can not both be passed to wait_for") @@ -404,55 +410,72 @@ def main(): if path: try: os.stat(path) - if search_regex: - try: - f = open(path) - try: - if re.search(search_regex, f.read(), re.MULTILINE): - break - else: - time.sleep(1) - finally: - f.close() - except IOError: - time.sleep(1) - pass - else: - break except OSError, e: - # File not present - if e.errno == 2: - time.sleep(1) - else: + # If anything except file not present, throw an error + if e.errno != 2: elapsed = datetime.datetime.now() - start module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds) + # file doesn't exist yet, so continue + else: + # File exists. Are there additional things to check? + if not compiled_search_re: + # nope, succeed! + break + try: + f = open(path) + try: + if re.search(compiled_search_re, f.read()): + # String found, success! + break + finally: + f.close() + except IOError: + pass elif port: + alt_connect_timeout = math.ceil((end - datetime.datetime.now()).total_seconds()) try: - s = _create_connection( (host, port), connect_timeout) - if search_regex: + s = _create_connection((host, port), min(connect_timeout, alt_connect_timeout)) + except: + # Failed to connect by connect_timeout. wait and try again + pass + else: + # Connected -- are there additional conditions? + if compiled_search_re: data = '' matched = False - while 1: - data += s.recv(1024) - if not data: + while datetime.datetime.now() < end: + max_timeout = math.ceil((end - datetime.datetime.now()).total_seconds()) + (readable, w, e) = select.select([s], [], [], max_timeout) + if not readable: + # No new data. Probably means our timeout + # expired + continue + response = s.recv(1024) + if not response: + # Server shutdown break - elif re.search(search_regex, data, re.MULTILINE): + data += response + if re.search(compiled_search_re, data): matched = True break + + # Shutdown the client socket + s.shutdown(socket.SHUT_RDWR) + s.close() if matched: - s.shutdown(socket.SHUT_RDWR) - s.close() + # Found our string, success! break else: + # Connection established, success! s.shutdown(socket.SHUT_RDWR) s.close() break - except: - time.sleep(1) - pass - else: - time.sleep(1) - else: + + # Conditions not yet met, wait and try again + time.sleep(1) + + else: # while-else + # Timeout expired elapsed = datetime.datetime.now() - start if port: if search_regex: @@ -485,4 +508,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() From e70002d2f9ec4c2958573463c42bfe8c332bb72b Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Wed, 28 Oct 2015 13:16:25 -0400 Subject: [PATCH 42/86] os_server_facts returns facts about multiple servers have `os_server_facts` call `list_servers` rather than `get_server`, and treat the `server` parameter as a wildcard pattern. This permits one to get facts on a single server: - os_server: server: webserver1 On mutiple servers: - os_server: server: webserver* Or on all servers: - os_server: Introduces a `detailed` parameter to request additional server details at the cost of additional API calls. --- cloud/openstack/os_server_facts.py | 48 ++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/cloud/openstack/os_server_facts.py b/cloud/openstack/os_server_facts.py index 5d61e4c18d3..bd694dbc558 100644 --- a/cloud/openstack/os_server_facts.py +++ b/cloud/openstack/os_server_facts.py @@ -15,6 +15,8 @@ # You should have received a copy of the GNU General Public License # along with this software. If not, see . +import fnmatch + try: import shade from shade import meta @@ -25,36 +27,47 @@ except ImportError: DOCUMENTATION = ''' --- module: os_server_facts -short_description: Retrieve facts about a compute instance +short_description: Retrieve facts about one or more compute instances version_added: "2.0" -author: "Monty Taylor (@emonty)" description: - - Retrieve facts about a server instance from OpenStack. + - Retrieve facts about server instances from OpenStack. notes: - - Facts are placed in the C(openstack) variable. + - This module creates a new top-level C(openstack_servers) fact, which + contains a list of servers. requirements: - "python >= 2.6" - "shade" options: server: description: - - Name or ID of the instance - required: true + - restrict results to servers with names matching + this glob expression (e.g., C). + required: false + default: None + detailed: + description: + - when true, return additional detail about servers at the expense + of additional API calls. + required: false + default: false extends_documentation_fragment: openstack ''' EXAMPLES = ''' -# Gather facts about a previously created server named vm1 +# Gather facts about all servers named C: - os_server_facts: cloud: rax-dfw - server: vm1 -- debug: var=openstack + server: web* +- debug: + var: openstack_servers ''' + def main(): argument_spec = openstack_full_argument_spec( - server=dict(required=True), + server=dict(required=False), + detailed=dict(required=False, type='bool'), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, **module_kwargs) @@ -64,10 +77,16 @@ def main(): try: cloud = shade.openstack_cloud(**module.params) - server = cloud.get_server(module.params['server']) - hostvars = dict(openstack=meta.get_hostvars_from_server( - cloud, server)) - module.exit_json(changed=False, ansible_facts=hostvars) + openstack_servers = cloud.list_servers( + detailed=module.params['detailed']) + + if module.params['server']: + # filter servers by name + pattern = module.params['server'] + openstack_servers = [server for server in openstack_servers + if fnmatch.fnmatch(server['name'], pattern)] + module.exit_json(changed=False, ansible_facts=dict( + openstack_servers=openstack_servers)) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) @@ -77,4 +96,3 @@ from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main() - From 57dcf2c9dd35fb535d77ca4fa30e9fed4be32472 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 28 Oct 2015 14:38:11 -0400 Subject: [PATCH 43/86] @lorin stepping down as maintainer --- cloud/amazon/_ec2_ami_search.py | 1 - cloud/amazon/ec2_eip.py | 1 - cloud/openstack/_keystone_user.py | 2 +- database/postgresql/postgresql_db.py | 2 +- database/postgresql/postgresql_user.py | 2 +- web_infrastructure/htpasswd.py | 2 +- 6 files changed, 4 insertions(+), 6 deletions(-) diff --git a/cloud/amazon/_ec2_ami_search.py b/cloud/amazon/_ec2_ami_search.py index 8ef0c0046ea..5add0260d80 100644 --- a/cloud/amazon/_ec2_ami_search.py +++ b/cloud/amazon/_ec2_ami_search.py @@ -66,7 +66,6 @@ options: default: paravirtual choices: ["paravirtual", "hvm"] -author: Lorin Hochstein ''' EXAMPLES = ''' diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py index 020ec67a497..69d762c8c08 100644 --- a/cloud/amazon/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -61,7 +61,6 @@ options: extends_documentation_fragment: - aws - ec2 -author: "Lorin Hochstein (@lorin) " author: "Rick Mendes (@rickmendes) " notes: - This module will return C(public_ip) on success, which will contain the diff --git a/cloud/openstack/_keystone_user.py b/cloud/openstack/_keystone_user.py index 48cc87b241a..9586b8b70a9 100644 --- a/cloud/openstack/_keystone_user.py +++ b/cloud/openstack/_keystone_user.py @@ -90,7 +90,7 @@ options: requirements: - "python >= 2.6" - python-keystoneclient -author: "Lorin Hochstein (@lorin)" +author: "Ansible Core Team (deprecated)" ''' EXAMPLES = ''' diff --git a/database/postgresql/postgresql_db.py b/database/postgresql/postgresql_db.py index 469d68fa0fa..762cb65e922 100644 --- a/database/postgresql/postgresql_db.py +++ b/database/postgresql/postgresql_db.py @@ -95,7 +95,7 @@ notes: - This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module. requirements: [ psycopg2 ] -author: "Lorin Hochstein (@lorin)" +author: "Ansible Core Team" ''' EXAMPLES = ''' diff --git a/database/postgresql/postgresql_user.py b/database/postgresql/postgresql_user.py index cee5a9ae131..4f2174330f6 100644 --- a/database/postgresql/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -137,7 +137,7 @@ notes: to all users. You may not specify password or role_attr_flags when the PUBLIC user is specified. requirements: [ psycopg2 ] -author: "Lorin Hochstein (@lorin)" +author: "Ansible Core Team" ''' EXAMPLES = ''' diff --git a/web_infrastructure/htpasswd.py b/web_infrastructure/htpasswd.py index 361a131ef2d..4253f1572ac 100644 --- a/web_infrastructure/htpasswd.py +++ b/web_infrastructure/htpasswd.py @@ -69,7 +69,7 @@ notes: - "On Debian, Ubuntu, or Fedora: install I(python-passlib)." - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)." requires: [ passlib>=1.6 ] -author: "Lorin Hochstein (@lorin)" +author: "Ansible Core Team" """ EXAMPLES = """ From a76184ad1f9bda1bcbbd334864a55c2871c10343 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 28 Oct 2015 14:39:20 -0400 Subject: [PATCH 44/86] @bradobro stepping down as maintainer --- system/authorized_key.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/authorized_key.py b/system/authorized_key.py index 361e68cb009..8a97722b222 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -82,7 +82,7 @@ options: version_added: "1.9" description: - "Adds or removes authorized keys for particular user accounts" -author: "Brad Olson (@bradobro)" +author: "Ansible Core Team" ''' EXAMPLES = ''' From 7cb9289197c906162457ac2fdcd6f2f8d0ef1d34 Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 28 Oct 2015 14:40:54 -0400 Subject: [PATCH 45/86] Clarity of owner, even though module is deprecated --- cloud/amazon/_ec2_ami_search.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/_ec2_ami_search.py b/cloud/amazon/_ec2_ami_search.py index 5add0260d80..a85bdf00223 100644 --- a/cloud/amazon/_ec2_ami_search.py +++ b/cloud/amazon/_ec2_ami_search.py @@ -66,6 +66,7 @@ options: default: paravirtual choices: ["paravirtual", "hvm"] +author: "Ansible Core Team (deprecated)" ''' EXAMPLES = ''' From 344cf5fc0e2c8637fe9513206b2c843ca60264cf Mon Sep 17 00:00:00 2001 From: Greg DeKoenigsberg Date: Wed, 28 Oct 2015 14:47:03 -0400 Subject: [PATCH 46/86] Remove @ralph-tice from maintainership per his request --- cloud/amazon/s3.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index fdeaafd58bd..ada5cd51c84 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -146,7 +146,6 @@ options: requirements: [ "boto" ] author: - "Lester Wade (@lwade)" - - "Ralph Tice (@ralph-tice)" extends_documentation_fragment: aws ''' From 2dd7ac7a4569039087a4d364977457d7daf69aa4 Mon Sep 17 00:00:00 2001 From: "wtanaka.com" Date: Wed, 28 Oct 2015 10:35:51 -1000 Subject: [PATCH 47/86] Update target parameter documentation xz support was only added recently (2015-05-15), so referring to it unqualified in the online documentation is confusing. --- database/mysql/mysql_db.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index 33720f5d4f6..22ae4157b4b 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -79,7 +79,7 @@ options: target: description: - Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL - files (C(.sql)) as well as bzip2 (C(.bz2)), gzip (C(.gz)) and xz compressed files are supported. + files (C(.sql)) as well as bzip2 (C(.bz2)), gzip (C(.gz)) and xz (Added in 2.0) compressed files are supported. required: false notes: - Requires the MySQLdb Python package on the remote host. For Ubuntu, this From 7f59773460d79b3dae34c375ba68caea1bfc09a8 Mon Sep 17 00:00:00 2001 From: Ales Nosek Date: Wed, 28 Oct 2015 22:04:32 -0700 Subject: [PATCH 48/86] ini_file should only change what was specified and nothing more #5860 See also: http://alesnosek.com/blog/2015/08/03/improving-ansibles-ini-file-module/ --- files/ini_file.py | 127 +++++++++++++++++++++++----------------------- 1 file changed, 63 insertions(+), 64 deletions(-) diff --git a/files/ini_file.py b/files/ini_file.py index cb3edb2cff2..5d6df779cf0 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- # (c) 2012, Jan-Piet Mens +# (c) 2015, Ales Nosek # # This file is part of Ansible # @@ -28,7 +29,7 @@ description: - Manage (add, remove, change) individual settings in an INI-style file without having to manage the file as a whole with, say, M(template) or M(assemble). Adds missing sections if they don't exist. - - Comments are discarded when the source file is read, and therefore will not + - Comments are discarded when the source file is read, and therefore will not show up in the destination file. version_added: "0.9" options: @@ -79,7 +80,7 @@ notes: Either use M(template) to create a base INI file with a C([default]) section, or use M(lineinfile) to add the missing line. requirements: [ ConfigParser ] -author: "Jan-Piet Mens (@jpmens)" +author: "Jan-Piet Mens (@jpmens), Ales Nosek" ''' EXAMPLES = ''' @@ -101,79 +102,77 @@ import sys def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False): - changed = False - if (sys.version_info[0] == 2 and sys.version_info[1] >= 7) or sys.version_info[0] >= 3: - cp = ConfigParser.ConfigParser(allow_no_value=True) - else: - cp = ConfigParser.ConfigParser() - cp.optionxform = identity - - try: - f = open(filename) - cp.readfp(f) - except IOError: - pass - - - if state == 'absent': - if option is None: - changed = cp.remove_section(section) - else: - try: - changed = cp.remove_option(section, option) - except ConfigParser.NoSectionError: - # Option isn't present if the section isn't either - pass - - if state == 'present': - # DEFAULT section is always there by DEFAULT, so never try to add it. - if not cp.has_section(section) and section.upper() != 'DEFAULT': + with open(filename, 'r') as ini_file: + ini_lines = ini_file.readlines() + # append a fake section line to simplify the logic + ini_lines.append('[') - cp.add_section(section) - changed = True + within_section = not section + section_start = 0 + changed = False - if option is not None and value is not None: - try: - oldvalue = cp.get(section, option) - if str(value) != str(oldvalue): - cp.set(section, option, value) + for index, line in enumerate(ini_lines): + if line.startswith('[%s]' % section): + within_section = True + section_start = index + elif line.startswith('['): + if within_section: + if state == 'present': + # insert missing option line at the end of the section + ini_lines.insert(index, '%s = %s\n' % (option, value)) + changed = True + elif state == 'absent' and not option: + # remove the entire section + del ini_lines[section_start:index] changed = True - except ConfigParser.NoSectionError: - cp.set(section, option, value) - changed = True - except ConfigParser.NoOptionError: - cp.set(section, option, value) - changed = True - except ConfigParser.InterpolationError: - cp.set(section, option, value) - changed = True + break + else: + if within_section and option: + if state == 'present': + # change the existing option line + if re.match('%s *=' % option, line) \ + or re.match('# *%s *=' % option, line) \ + or re.match('; *%s *=' % option, line): + newline = '%s = %s\n' % (option, value) + changed = ini_lines[index] != newline + ini_lines[index] = newline + if changed: + # remove all possible option occurences from the rest of the section + index = index + 1 + while index < len(ini_lines): + line = ini_lines[index] + if line.startswith('['): + break + if re.match('%s *=' % option, line): + del ini_lines[index] + else: + index = index + 1 + break + else: + # comment out the existing option line + if re.match('%s *=' % option, line): + ini_lines[index] = '#%s' % ini_lines[index] + changed = True + break + + # remove the fake section line + del ini_lines[-1:] + + if not within_section and option and state == 'present': + ini_lines.append('[%s]\n' % section) + ini_lines.append('%s = %s\n' % (option, value)) + changed = True + if changed and not module.check_mode: if backup: module.backup_local(filename) - - try: - f = open(filename, 'w') - cp.write(f) - except: - module.fail_json(msg="Can't create %s" % filename) + with open(filename, 'w') as ini_file: + ini_file.writelines(ini_lines) return changed -# ============================================================== -# identity - -def identity(arg): - """ - This function simply returns its argument. It serves as a - replacement for ConfigParser.optionxform, which by default - changes arguments to lower case. The identity function is a - better choice than str() or unicode(), because it is - encoding-agnostic. - """ - return arg - # ============================================================== # main From 22790d301a41df78eb756c8e0da9526984c68475 Mon Sep 17 00:00:00 2001 From: Ales Nosek Date: Fri, 30 Oct 2015 21:57:25 -0700 Subject: [PATCH 49/86] Make the syntax work with Python 2.4 --- files/ini_file.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/files/ini_file.py b/files/ini_file.py index 5d6df779cf0..d837c329d4b 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -103,10 +103,13 @@ import sys def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False): - with open(filename, 'r') as ini_file: + ini_file = open(filename, 'r') + try: ini_lines = ini_file.readlines() # append a fake section line to simplify the logic ini_lines.append('[') + finally: + ini_file.close() within_section = not section section_start = 0 @@ -168,8 +171,11 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese if changed and not module.check_mode: if backup: module.backup_local(filename) - with open(filename, 'w') as ini_file: + ini_file = open(filename, 'w') + try: ini_file.writelines(ini_lines) + finally: + ini_file.close() return changed From 89957eed537b08001ea171ba6a4ead41d31ab983 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 31 Oct 2015 14:24:32 -0400 Subject: [PATCH 50/86] document mysql collation can only be set during creation --- database/mysql/mysql_db.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/mysql/mysql_db.py b/database/mysql/mysql_db.py index 33720f5d4f6..8927a1bc652 100644 --- a/database/mysql/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -68,7 +68,7 @@ options: choices: [ "present", "absent", "dump", "import" ] collation: description: - - Collation mode + - Collation mode (sorting). This only applies to new table/databases and does not update existing ones, this is a limitation of MySQL. required: false default: null encoding: From d192e2c3e32575713d94a8f7fd19c4d9980a0e90 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 31 Oct 2015 21:35:48 -0400 Subject: [PATCH 51/86] code cleanup and reoorg, renamed vars and functions to actual purpose reneabled logging of steps --- utilities/logic/async_wrapper.py | 214 ++++++++++++++++--------------- 1 file changed, 108 insertions(+), 106 deletions(-) diff --git a/utilities/logic/async_wrapper.py b/utilities/logic/async_wrapper.py index 2bc2dc21823..55f5283ed79 100644 --- a/utilities/logic/async_wrapper.py +++ b/utilities/logic/async_wrapper.py @@ -27,15 +27,20 @@ import shlex import os import subprocess import sys -import datetime import traceback import signal import time import syslog + +syslog.openlog('ansible-%s' % os.path.basename(__file__)) +syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:])) + +def notice(msg): + syslog.syslog(syslog.LOG_NOTICE, msg) + def daemonize_self(): # daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012 - # logger.info("cobblerd started") try: pid = os.fork() if pid > 0: @@ -65,50 +70,21 @@ def daemonize_self(): os.dup2(dev_null.fileno(), sys.stdout.fileno()) os.dup2(dev_null.fileno(), sys.stderr.fileno()) -if len(sys.argv) < 3: - print json.dumps({ - "failed" : True, - "msg" : "usage: async_wrapper . Humans, do not call directly!" - }) - sys.exit(1) - -jid = "%s.%d" % (sys.argv[1], os.getpid()) -time_limit = sys.argv[2] -wrapped_module = sys.argv[3] -argsfile = sys.argv[4] -cmd = "%s %s" % (wrapped_module, argsfile) -syslog.openlog('ansible-%s' % os.path.basename(__file__)) -syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:])) - -# setup logging directory -logdir = os.path.expanduser("~/.ansible_async") -log_path = os.path.join(logdir, jid) - -if not os.path.exists(logdir): - try: - os.makedirs(logdir) - except: - print json.dumps({ - "failed" : 1, - "msg" : "could not create: %s" % logdir - }) +def _run_module(wrapped_cmd, jid, job_path): -def _run_command(wrapped_cmd, jid, log_path): - - logfile = open(log_path, "w") - logfile.write(json.dumps({ "started" : 1, "ansible_job_id" : jid })) - logfile.close() - logfile = open(log_path, "w") + jobfile = open(job_path, "w") + jobfile.write(json.dumps({ "started" : 1, "ansible_job_id" : jid })) + jobfile.close() + jobfile = open(job_path, "w") result = {} outdata = '' try: cmd = shlex.split(wrapped_cmd) - script = subprocess.Popen(cmd, shell=False, - stdin=None, stdout=logfile, stderr=logfile) + script = subprocess.Popen(cmd, shell=False, stdin=None, stdout=jobfile, stderr=jobfile) script.communicate() - outdata = file(log_path).read() + outdata = file(job_path).read() result = json.loads(outdata) except (OSError, IOError), e: @@ -118,83 +94,109 @@ def _run_command(wrapped_cmd, jid, log_path): "msg": str(e), } result['ansible_job_id'] = jid - logfile.write(json.dumps(result)) + jobfile.write(json.dumps(result)) except: result = { "failed" : 1, "cmd" : wrapped_cmd, - "data" : outdata, # temporary debug only + "data" : outdata, # temporary notice only "msg" : traceback.format_exc() } result['ansible_job_id'] = jid - logfile.write(json.dumps(result)) - logfile.close() + jobfile.write(json.dumps(result)) + jobfile.close() -# immediately exit this process, leaving an orphaned process -# running which immediately forks a supervisory timing process -#import logging -#import logging.handlers +#################### +## main ## +#################### +if __name__ == '__main__': -#logger = logging.getLogger("ansible_async") -#logger.setLevel(logging.WARNING) -#logger.addHandler( logging.handlers.SysLogHandler("/dev/log") ) -def debug(msg): - #logger.warning(msg) - pass + if len(sys.argv) < 3: + print json.dumps({ + "failed" : True, + "msg" : "usage: async_wrapper . Humans, do not call directly!" + }) + sys.exit(1) -try: - pid = os.fork() - if pid: - # Notify the overlord that the async process started - - # we need to not return immmediately such that the launched command has an attempt - # to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile) - # this probably could be done with some IPC later. Modules should always read - # the argsfile at the very first start of their execution anyway - time.sleep(1) - debug("Return async_wrapper task started.") - print json.dumps({ "started" : 1, "ansible_job_id" : jid, "results_file" : log_path }) - sys.stdout.flush() - sys.exit(0) - else: - # The actual wrapper process - - # Daemonize, so we keep on running - daemonize_self() - - # we are now daemonized, create a supervisory process - debug("Starting module and watcher") - - sub_pid = os.fork() - if sub_pid: - # the parent stops the process after the time limit - remaining = int(time_limit) - - # set the child process group id to kill all children - os.setpgid(sub_pid, sub_pid) - - debug("Start watching %s (%s)"%(sub_pid, remaining)) - time.sleep(5) - while os.waitpid(sub_pid, os.WNOHANG) == (0, 0): - debug("%s still running (%s)"%(sub_pid, remaining)) - time.sleep(5) - remaining = remaining - 5 - if remaining <= 0: - debug("Now killing %s"%(sub_pid)) - os.killpg(sub_pid, signal.SIGKILL) - debug("Sent kill to group %s"%sub_pid) - time.sleep(1) - sys.exit(0) - debug("Done in kid B.") - os._exit(0) - else: - # the child process runs the actual module - debug("Start module (%s)"%os.getpid()) - _run_command(cmd, jid, log_path) - debug("Module complete (%s)"%os.getpid()) - sys.exit(0) + jid = "%s.%d" % (sys.argv[1], os.getpid()) + time_limit = sys.argv[2] + wrapped_module = sys.argv[3] + argsfile = sys.argv[4] + cmd = "%s %s" % (wrapped_module, argsfile) + step = 5 + + # setup job output directory + jobdir = os.path.expanduser("~/.ansible_async") + job_path = os.path.join(jobdir, jid) + + if not os.path.exists(jobdir): + try: + os.makedirs(jobdir) + except: + print json.dumps({ + "failed" : 1, + "msg" : "could not create: %s" % jobdir + }) + # immediately exit this process, leaving an orphaned process + # running which immediately forks a supervisory timing process -except Exception, err: - debug("error: %s"%(err)) - raise err + try: + pid = os.fork() + if pid: + # Notify the overlord that the async process started + + # we need to not return immmediately such that the launched command has an attempt + # to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile) + # this probably could be done with some IPC later. Modules should always read + # the argsfile at the very first start of their execution anyway + notice("Return async_wrapper task started.") + print json.dumps({ "started" : 1, "ansible_job_id" : jid, "results_file" : job_path }) + sys.stdout.flush() + time.sleep(1) + sys.exit(0) + else: + # The actual wrapper process + + # Daemonize, so we keep on running + daemonize_self() + + # we are now daemonized, create a supervisory process + notice("Starting module and watcher") + + sub_pid = os.fork() + if sub_pid: + # the parent stops the process after the time limit + remaining = int(time_limit) + + # set the child process group id to kill all children + os.setpgid(sub_pid, sub_pid) + + notice("Start watching %s (%s)"%(sub_pid, remaining)) + time.sleep(step) + while os.waitpid(sub_pid, os.WNOHANG) == (0, 0): + notice("%s still running (%s)"%(sub_pid, remaining)) + time.sleep(step) + remaining = remaining - step + if remaining <= 0: + notice("Now killing %s"%(sub_pid)) + os.killpg(sub_pid, signal.SIGKILL) + notice("Sent kill to group %s"%sub_pid) + time.sleep(1) + sys.exit(0) + notice("Done in kid B.") + sys.exit(0) + else: + # the child process runs the actual module + notice("Start module (%s)"%os.getpid()) + _run_module(cmd, jid, job_path) + notice("Module complete (%s)"%os.getpid()) + sys.exit(0) + + except Exception, err: + notice("error: %s"%(err)) + print json.dumps({ + "failed" : True, + "msg" : "FATAL ERROR: %s" % str(err) + }) + sys.exit(1) From bf5929d32a877bf4b9f59cf0072efa7cbdf9bf25 Mon Sep 17 00:00:00 2001 From: Michael Schuett Date: Tue, 30 Jun 2015 02:25:28 -0400 Subject: [PATCH 52/86] docker_image TLS Check commit enables using tls when using the docker_image module. It also removes the default for docker_url which doesn't allow us to check for DOCKER_HOST which is a more sane default. This allows you to use docker_image on OSX but more documentation is needed. --- cloud/docker/docker_image.py | 91 ++++++++++++++++++++++++++++++++++-- 1 file changed, 88 insertions(+), 3 deletions(-) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index e6cfd87ab43..92aaa44a499 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -118,6 +118,7 @@ Remove image from local docker storage: ''' import re +import os from urlparse import urlparse try: @@ -161,11 +162,90 @@ class DockerImageManager: self.name = self.module.params.get('name') self.tag = self.module.params.get('tag') self.nocache = self.module.params.get('nocache') - docker_url = urlparse(module.params.get('docker_url')) + + # Connect to the docker server using any configured host and TLS settings. + + env_host = os.getenv('DOCKER_HOST') + env_docker_verify = os.getenv('DOCKER_TLS_VERIFY') + env_cert_path = os.getenv('DOCKER_CERT_PATH') + env_docker_hostname = os.getenv('DOCKER_TLS_HOSTNAME') + + docker_url = module.params.get('docker_url') + if not docker_url: + if env_host: + docker_url = env_host + else: + docker_url = 'unix://var/run/docker.sock' + + docker_api_version = module.params.get('docker_api_version') + + tls_client_cert = module.params.get('tls_client_cert', None) + if not tls_client_cert and env_cert_path: + tls_client_cert = os.path.join(env_cert_path, 'cert.pem') + + tls_client_key = module.params.get('tls_client_key', None) + if not tls_client_key and env_cert_path: + tls_client_key = os.path.join(env_cert_path, 'key.pem') + + tls_ca_cert = module.params.get('tls_ca_cert') + if not tls_ca_cert and env_cert_path: + tls_ca_cert = os.path.join(env_cert_path, 'ca.pem') + + tls_hostname = module.params.get('tls_hostname') + if tls_hostname is None: + if env_docker_hostname: + tls_hostname = env_docker_hostname + else: + parsed_url = urlparse(docker_url) + if ':' in parsed_url.netloc: + tls_hostname = parsed_url.netloc[:parsed_url.netloc.rindex(':')] + else: + tls_hostname = parsed_url + if not tls_hostname: + tls_hostname = True + + # use_tls can be one of four values: + # no: Do not use tls + # encrypt: Use tls. We may do client auth. We will not verify the server + # verify: Use tls. We may do client auth. We will verify the server + # None: Only use tls if the parameters for client auth were specified + # or tls_ca_cert (which requests verifying the server with + # a specific ca certificate) + use_tls = module.params.get('use_tls') + if use_tls is None and env_docker_verify is not None: + use_tls = 'verify' + + tls_config = None + if use_tls != 'no': + params = {} + + # Setup client auth + if tls_client_cert and tls_client_key: + params['client_cert'] = (tls_client_cert, tls_client_key) + + # We're allowed to verify the connection to the server + if use_tls == 'verify' or (use_tls is None and tls_ca_cert): + if tls_ca_cert: + params['ca_cert'] = tls_ca_cert + params['verify'] = True + params['assert_hostname'] = tls_hostname + else: + params['verify'] = True + params['assert_hostname'] = tls_hostname + elif use_tls == 'encrypt': + params['verify'] = False + + if params: + # See https://github.com/docker/docker-py/blob/d39da11/docker/utils/utils.py#L279-L296 + docker_url = docker_url.replace('tcp://', 'https://') + tls_config = docker.tls.TLSConfig(**params) + self.client = docker.Client( base_url=docker_url.geturl(), version=module.params.get('docker_api_version'), - timeout=module.params.get('timeout')) + timeout=module.params.get('timeout'), + tls=tls_config) + self.changed = False self.log = [] self.error_msg = None @@ -244,7 +324,12 @@ def main(): tag = dict(required=False, default="latest"), nocache = dict(default=False, type='bool'), state = dict(default='present', choices=['absent', 'present', 'build']), - docker_url = dict(default='unix://var/run/docker.sock'), + use_tls = dict(default=None, choices=['no', 'encrypt', 'verify']), + tls_client_cert = dict(required=False, default=None, type='str'), + tls_client_key = dict(required=False, default=None, type='str'), + tls_ca_cert = dict(required=False, default=None, type='str'), + tls_hostname = dict(required=False, type='str', default=None), + docker_url = dict(), docker_api_version = dict(required=False, default=DEFAULT_DOCKER_API_VERSION, type='str'), From addbc329beb74b7d0561960d914294824dac9eeb Mon Sep 17 00:00:00 2001 From: Michael Schuett Date: Wed, 1 Jul 2015 19:41:17 -0400 Subject: [PATCH 53/86] Improve Error Reporting This will hopefully help mac users be able to quickly resolve any issues they may find when trying to use this module. --- cloud/docker/docker_image.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index 92aaa44a499..20776ee139c 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -371,6 +371,29 @@ def main(): module.exit_json(failed=failed, changed=manager.has_changed(), msg=msg, image_id=image_id) + except SSLError as e: + if get_platform() == "Darwin" and "DOCKER_HOST" in os.environ: + # Ensure that the environment variables has been set + if "DOCKER_HOST" not in os.environ: + environment_error = ''' + It looks like you have not set your docker environment + variables. Please ensure that you have set the requested + variables as instructed when running boot2docker up. If + they are set in .bash_profile you will need to symlink + it to .bashrc. + ''' + module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) + # If the above is true it's likely the hostname does not match + else: + environment_error = ''' + You may need to ignore hostname missmatches by passing + -e 'host_key_checking=False' through the command line. + ''' + module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) + # General error for non darwin users + else: + module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e)) + except DockerAPIError as e: module.exit_json(failed=True, changed=manager.has_changed(), msg="Docker API error: " + e.explanation) From 0a5b7087bdac11f8eab76b94098d1f1928341851 Mon Sep 17 00:00:00 2001 From: Michael Schuett Date: Wed, 1 Jul 2015 19:43:26 -0400 Subject: [PATCH 54/86] Improve Message Give user a course of action in the case where the suggestions do not work. This will hopefully allow us to work through any further issues much faster. --- cloud/docker/docker_image.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index 20776ee139c..bdc31e71c99 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -388,6 +388,8 @@ def main(): environment_error = ''' You may need to ignore hostname missmatches by passing -e 'host_key_checking=False' through the command line. + If this does not resolve the issue please open an issue + at ansible/ansible-modules-core and ping michaeljs1990 ''' module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) # General error for non darwin users From 1e8d20b0dae125f504e1cccdaef63c4aecd87f16 Mon Sep 17 00:00:00 2001 From: Michael Schuett Date: Wed, 1 Jul 2015 20:15:23 -0400 Subject: [PATCH 55/86] Documentation Fix Updated documentation to match current module state. --- cloud/docker/docker_image.py | 37 +++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index bdc31e71c99..4498998e8fe 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -63,8 +63,43 @@ options: description: - URL of docker host to issue commands to required: false - default: unix://var/run/docker.sock + default: ${DOCKER_HOST} or unix://var/run/docker.sock aliases: [] + use_tls: + description: + - Whether to use tls to connect to the docker server. "no" means not to + use tls (and ignore any other tls related parameters). "encrypt" means + to use tls to encrypt the connection to the server. "verify" means to + also verify that the server's certificate is valid for the server + (this both verifies the certificate against the CA and that the + certificate was issued for that host. If this is unspecified, tls will + only be used if one of the other tls options require it. + choices: [ "no", "encrypt", "verify" ] + version_added: "1.9" + tls_client_cert: + description: + - Path to the PEM-encoded certificate used to authenticate docker client. + If specified tls_client_key must be valid + default: ${DOCKER_CERT_PATH}/cert.pem + version_added: "1.9" + tls_client_key: + description: + - Path to the PEM-encoded key used to authenticate docker client. If + specified tls_client_cert must be valid + default: ${DOCKER_CERT_PATH}/key.pem + version_added: "1.9" + tls_ca_cert: + description: + - Path to a PEM-encoded certificate authority to secure the Docker connection. + This has no effect if use_tls is encrypt. + default: ${DOCKER_CERT_PATH}/ca.pem + version_added: "1.9" + tls_hostname: + description: + - A hostname to check matches what's supplied in the docker server's + certificate. If unspecified, the hostname is taken from the docker_url. + default: Taken from docker_url + version_added: "1.9" docker_api_version: description: - Remote API version to use. This defaults to the current default as From c1264988996fad7d788c07a806a132a9b9ad1761 Mon Sep 17 00:00:00 2001 From: Michael Schuett Date: Thu, 2 Jul 2015 11:45:29 -0400 Subject: [PATCH 56/86] Remove faulty logic Update logic after splitting the error into two separate messages. --- cloud/docker/docker_image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index 4498998e8fe..1800dfa28d9 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -407,7 +407,7 @@ def main(): module.exit_json(failed=failed, changed=manager.has_changed(), msg=msg, image_id=image_id) except SSLError as e: - if get_platform() == "Darwin" and "DOCKER_HOST" in os.environ: + if get_platform() == "Darwin": # Ensure that the environment variables has been set if "DOCKER_HOST" not in os.environ: environment_error = ''' From 1dcb31cad6ae0805ee463228f83973a004e3c7ab Mon Sep 17 00:00:00 2001 From: Michael Schuett Date: Tue, 7 Jul 2015 09:44:27 -0400 Subject: [PATCH 57/86] remove .geturl() Can't call geturl on a string. --- cloud/docker/docker_image.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index 1800dfa28d9..6f41755c929 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -276,7 +276,7 @@ class DockerImageManager: tls_config = docker.tls.TLSConfig(**params) self.client = docker.Client( - base_url=docker_url.geturl(), + base_url=docker_url, version=module.params.get('docker_api_version'), timeout=module.params.get('timeout'), tls=tls_config) From b0357bf9e8f27cad04bd3882acab215e0463a0a5 Mon Sep 17 00:00:00 2001 From: Michael Schuett Date: Tue, 7 Jul 2015 15:36:10 -0400 Subject: [PATCH 58/86] Handle connection error Try and help when mac hits a connection error. --- cloud/docker/docker_image.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index 6f41755c929..3807c963de1 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -431,6 +431,20 @@ def main(): else: module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e)) + except ConnectionError as e: + if get_platform() == "Darwin" and "DOCKER_HOST" not in os.environ: + # Ensure that the environment variables has been set + environment_error = ''' + It looks like you have not set your docker environment + variables. Please ensure that you have set the requested + variables as instructed when running boot2docker up. If + they are set in .bash_profile you will need to symlink + it to .bashrc. + ''' + module.exit_json(failed=True, chaged=manager.has_changed(), msg="ConnectionError: " + str(e) + environment_error) + + module.exit_json(failed=True, chaged=manager.has_changed(), msg="ConnectionError: " + str(e)) + except DockerAPIError as e: module.exit_json(failed=True, changed=manager.has_changed(), msg="Docker API error: " + e.explanation) From 80c2e28a48c487caf3f133af81d1d94f410809ea Mon Sep 17 00:00:00 2001 From: Michael Schuett Date: Tue, 7 Jul 2015 15:37:36 -0400 Subject: [PATCH 59/86] Fix message Previous fix did not actual work. This fix does however. --- cloud/docker/docker_image.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index 3807c963de1..327349dc683 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -421,10 +421,10 @@ def main(): # If the above is true it's likely the hostname does not match else: environment_error = ''' - You may need to ignore hostname missmatches by passing - -e 'host_key_checking=False' through the command line. - If this does not resolve the issue please open an issue - at ansible/ansible-modules-core and ping michaeljs1990 + You may need to ignore hostname missmatches by setting + tls_hostname=boot2docker in your role. If this does not + resolve the issue please open an issue at + ansible/ansible-modules-core and ping michaeljs1990 ''' module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) # General error for non darwin users From e318be30ff0a4c19e61ff15437ff96a6ba57f696 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 2 Nov 2015 10:43:33 -0800 Subject: [PATCH 60/86] Fix issues version_added and chaged => changed typo --- cloud/docker/docker_image.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/cloud/docker/docker_image.py b/cloud/docker/docker_image.py index 327349dc683..a2d0143e284 100644 --- a/cloud/docker/docker_image.py +++ b/cloud/docker/docker_image.py @@ -75,31 +75,31 @@ options: certificate was issued for that host. If this is unspecified, tls will only be used if one of the other tls options require it. choices: [ "no", "encrypt", "verify" ] - version_added: "1.9" + version_added: "2.0" tls_client_cert: description: - Path to the PEM-encoded certificate used to authenticate docker client. If specified tls_client_key must be valid default: ${DOCKER_CERT_PATH}/cert.pem - version_added: "1.9" + version_added: "2.0" tls_client_key: description: - Path to the PEM-encoded key used to authenticate docker client. If specified tls_client_cert must be valid default: ${DOCKER_CERT_PATH}/key.pem - version_added: "1.9" + version_added: "2.0" tls_ca_cert: description: - Path to a PEM-encoded certificate authority to secure the Docker connection. This has no effect if use_tls is encrypt. default: ${DOCKER_CERT_PATH}/ca.pem - version_added: "1.9" + version_added: "2.0" tls_hostname: description: - A hostname to check matches what's supplied in the docker server's certificate. If unspecified, the hostname is taken from the docker_url. default: Taken from docker_url - version_added: "1.9" + version_added: "2.0" docker_api_version: description: - Remote API version to use. This defaults to the current default as @@ -417,7 +417,7 @@ def main(): they are set in .bash_profile you will need to symlink it to .bashrc. ''' - module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) + module.exit_json(failed=True, changed=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) # If the above is true it's likely the hostname does not match else: environment_error = ''' @@ -426,10 +426,10 @@ def main(): resolve the issue please open an issue at ansible/ansible-modules-core and ping michaeljs1990 ''' - module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) + module.exit_json(failed=True, changed=manager.has_changed(), msg="SSLError: " + str(e) + environment_error) # General error for non darwin users else: - module.exit_json(failed=True, chaged=manager.has_changed(), msg="SSLError: " + str(e)) + module.exit_json(failed=True, changed=manager.has_changed(), msg="SSLError: " + str(e)) except ConnectionError as e: if get_platform() == "Darwin" and "DOCKER_HOST" not in os.environ: @@ -441,9 +441,9 @@ def main(): they are set in .bash_profile you will need to symlink it to .bashrc. ''' - module.exit_json(failed=True, chaged=manager.has_changed(), msg="ConnectionError: " + str(e) + environment_error) + module.exit_json(failed=True, changed=manager.has_changed(), msg="ConnectionError: " + str(e) + environment_error) - module.exit_json(failed=True, chaged=manager.has_changed(), msg="ConnectionError: " + str(e)) + module.exit_json(failed=True, changed=manager.has_changed(), msg="ConnectionError: " + str(e)) except DockerAPIError as e: module.exit_json(failed=True, changed=manager.has_changed(), msg="Docker API error: " + e.explanation) From baafcfc091d09f69ef51a9b1cd36ee2fc5169f83 Mon Sep 17 00:00:00 2001 From: Harlan Lieberman-Berg Date: Sat, 15 Aug 2015 11:40:00 +0200 Subject: [PATCH 61/86] Change behavior of apt.py around installing recommended packages. Closes #1189. This will cause the settings in Ansible to override the system settings. That will have no effect except on systems that have an out-of-Ansible configuration that disables automatic installation of recommended packages. Previously, ansible would use the OS default whenever install_recommends wasn't part of the playbook. This change will cause the Ansible default configuration setting of installing recommended packages to override the configuration files set on the OS for things installed through ansible, even when there is no install_recommends specified in the playbook. Because the OS default matches the Ansible default, this shouldn't have wide impact. --- packaging/os/apt.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index d99eb85ff7e..7198d934ca2 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -231,7 +231,7 @@ def package_status(m, pkgname, version, cache, state): provided_packages = cache.get_providing_packages(pkgname) if provided_packages: is_installed = False - # when virtual package providing only one package, look up status of target package + # when virtual package providing only one package, look up status of target package if cache.is_virtual_package(pkgname) and len(provided_packages) == 1: package = provided_packages[0] installed, upgradable, has_files = package_status(m, package.name, version, cache, state='install') @@ -386,7 +386,9 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None, if default_release: cmd += " -t '%s'" % (default_release,) if not install_recommends: - cmd += " --no-install-recommends" + cmd += " -o APT::Install-Recommends=no" + else: + cmd += " -o APT::Install-Recommends=yes" rc, out, err = m.run_command(cmd) if rc: From a234e9b7b2a7b13400023dd3b703ca41f8163715 Mon Sep 17 00:00:00 2001 From: Harlan Lieberman-Berg Date: Sat, 15 Aug 2015 18:41:42 +0200 Subject: [PATCH 62/86] Change install_recommended in apt to a trinary. Conditions are now "yes", "no", and "default", with the latter falling back to the OS default. --- packaging/os/apt.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 7198d934ca2..fe8bbfff00a 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -62,7 +62,7 @@ options: default: null install_recommends: description: - - Corresponds to the C(--no-install-recommends) option for I(apt). Default behavior (C(yes)) replicates apt's default behavior; C(no) does not install recommended packages. Suggested packages are never installed. + - Corresponds to the C(--no-install-recommends) option for I(apt). C(yes) installs recommended packages. C(no) does not install recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed. required: false default: yes choices: [ "yes", "no" ] @@ -339,7 +339,7 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache): return new_pkgspec def install(m, pkgspec, cache, upgrade=False, default_release=None, - install_recommends=True, force=False, + install_recommends, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), build_dep=False): pkg_list = [] @@ -385,9 +385,9 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None, if default_release: cmd += " -t '%s'" % (default_release,) - if not install_recommends: + if install_recommends == 'no': cmd += " -o APT::Install-Recommends=no" - else: + elif install_recommends == 'yes': cmd += " -o APT::Install-Recommends=yes" rc, out, err = m.run_command(cmd) @@ -549,7 +549,7 @@ def main(): package = dict(default=None, aliases=['pkg', 'name'], type='list'), deb = dict(default=None), default_release = dict(default=None, aliases=['default-release']), - install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'), + install_recommends = dict(default='default', aliases=['install-recommends'], choices=['default', 'yes', 'no'), force = dict(default='no', type='bool'), upgrade = dict(choices=['no', 'yes', 'safe', 'full', 'dist']), dpkg_options = dict(default=DPKG_OPTIONS) From 06a4efa1cf40daf27cf180a0d07646f4d921d15e Mon Sep 17 00:00:00 2001 From: Harlan Lieberman-Berg Date: Sat, 15 Aug 2015 18:45:08 +0200 Subject: [PATCH 63/86] Add missing brace. --- packaging/os/apt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index fe8bbfff00a..16c6a5f83b6 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -549,7 +549,7 @@ def main(): package = dict(default=None, aliases=['pkg', 'name'], type='list'), deb = dict(default=None), default_release = dict(default=None, aliases=['default-release']), - install_recommends = dict(default='default', aliases=['install-recommends'], choices=['default', 'yes', 'no'), + install_recommends = dict(default='default', aliases=['install-recommends'], choices=['default', 'yes', 'no']), force = dict(default='no', type='bool'), upgrade = dict(choices=['no', 'yes', 'safe', 'full', 'dist']), dpkg_options = dict(default=DPKG_OPTIONS) From a53cf5434bfbf4ae975bf9dd27f9d5bd2dd19c60 Mon Sep 17 00:00:00 2001 From: Harlan Lieberman-Berg Date: Sat, 15 Aug 2015 20:00:25 +0200 Subject: [PATCH 64/86] Give include_recommends a useless default to make the parser happy. --- packaging/os/apt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 16c6a5f83b6..cbf0375e473 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -339,7 +339,7 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache): return new_pkgspec def install(m, pkgspec, cache, upgrade=False, default_release=None, - install_recommends, force=False, + install_recommends='default', force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), build_dep=False): pkg_list = [] From 5cacef8617cdf9568134360451cfe8b7b619bbd2 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 2 Nov 2015 13:03:18 -0800 Subject: [PATCH 65/86] Fixes for bcoca's review of #1916 --- packaging/os/apt.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index cbf0375e473..b5c363ab1f5 100755 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -64,7 +64,7 @@ options: description: - Corresponds to the C(--no-install-recommends) option for I(apt). C(yes) installs recommended packages. C(no) does not install recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed. required: false - default: yes + default: null choices: [ "yes", "no" ] force: description: @@ -339,7 +339,7 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache): return new_pkgspec def install(m, pkgspec, cache, upgrade=False, default_release=None, - install_recommends='default', force=False, + install_recommends=None, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS), build_dep=False): pkg_list = [] @@ -385,10 +385,12 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None, if default_release: cmd += " -t '%s'" % (default_release,) - if install_recommends == 'no': + + if install_recommends is False: cmd += " -o APT::Install-Recommends=no" - elif install_recommends == 'yes': + elif install_recommends is True: cmd += " -o APT::Install-Recommends=yes" + # install_recommends is None uses the OS default rc, out, err = m.run_command(cmd) if rc: @@ -549,7 +551,7 @@ def main(): package = dict(default=None, aliases=['pkg', 'name'], type='list'), deb = dict(default=None), default_release = dict(default=None, aliases=['default-release']), - install_recommends = dict(default='default', aliases=['install-recommends'], choices=['default', 'yes', 'no']), + install_recommends = dict(default=None, aliases=['install-recommends'], type='bool'), force = dict(default='no', type='bool'), upgrade = dict(choices=['no', 'yes', 'safe', 'full', 'dist']), dpkg_options = dict(default=DPKG_OPTIONS) From a38e0095f9c4c141adebba54bebbd55bd39d959f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Mon, 2 Nov 2015 18:54:30 -0500 Subject: [PATCH 66/86] added missing version added --- cloud/amazon/elasticache.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index d3f7ff4cdc7..ba8ed455d12 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -48,6 +48,7 @@ options: - The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group for the specified engine will be used. required: false default: none + version_added: "2.0" node_type: description: - The compute and memory capacity of the nodes in the cache cluster From e16c5c54fd87a46bbc7019297a25e0bd98dafe5f Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 2 Nov 2015 17:27:20 -0800 Subject: [PATCH 67/86] Revert "Expose cache_parameter_group_name in elasticache module" This PR #1950 implements duplicate functionality to #1353 This reverts commit b04efa22c4403ca869e94e7918721306d23afa8d. Conflicts: cloud/amazon/elasticache.py --- cloud/amazon/elasticache.py | 26 ++++++-------------------- 1 file changed, 6 insertions(+), 20 deletions(-) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index ba8ed455d12..d275ba2be82 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -43,12 +43,6 @@ options: - The version number of the cache engine required: false default: none - cache_parameter_group_name: - description: - - The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group for the specified engine will be used. - required: false - default: none - version_added: "2.0" node_type: description: - The compute and memory capacity of the nodes in the cache cluster @@ -63,9 +57,9 @@ options: - The port number on which each of the cache nodes will accept connections required: false default: none - parameter_group: + cache_parameter_group: description: - - Specify non-default parameter group names to be associated with cache cluster + - The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group for the specified engine will be used. required: false default: None version_added: "2.0" @@ -158,12 +152,11 @@ class ElastiCacheManager(object): def __init__(self, module, name, engine, cache_engine_version, node_type, num_nodes, cache_port, parameter_group, cache_subnet_group, cache_security_groups, security_group_ids, zone, wait, - hard_modify, region, cache_parameter_group_name=None, **aws_connect_kwargs): + hard_modify, region, **aws_connect_kwargs): self.module = module self.name = name self.engine = engine self.cache_engine_version = cache_engine_version - self.cache_parameter_group_name = cache_parameter_group_name self.node_type = node_type self.num_nodes = num_nodes self.cache_port = cache_port @@ -224,7 +217,6 @@ class ElastiCacheManager(object): cache_node_type=self.node_type, engine=self.engine, engine_version=self.cache_engine_version, - cache_parameter_group_name=self.cache_parameter_group_name, cache_security_group_names=self.cache_security_groups, security_group_ids=self.security_group_ids, cache_parameter_group_name=self.parameter_group, @@ -306,8 +298,7 @@ class ElastiCacheManager(object): cache_parameter_group_name=self.parameter_group, security_group_ids=self.security_group_ids, apply_immediately=True, - engine_version=self.cache_engine_version, - cache_parameter_group_name=self.cache_parameter_group_name) + engine_version=self.cache_engine_version) except boto.exception.BotoServerError, e: self.module.fail_json(msg=e.message) @@ -493,7 +484,6 @@ def main(): name={'required': True}, engine={'required': False, 'default': 'memcached'}, cache_engine_version={'required': False}, - cache_parameter_group_name={'required': False}, node_type={'required': False, 'default': 'cache.m1.small'}, num_nodes={'required': False, 'default': None, 'type': 'int'}, parameter_group={'required': False, 'default': None}, @@ -522,7 +512,6 @@ def main(): state = module.params['state'] engine = module.params['engine'] cache_engine_version = module.params['cache_engine_version'] - cache_parameter_group_name = module.params['cache_parameter_group_name'] node_type = module.params['node_type'] num_nodes = module.params['num_nodes'] cache_port = module.params['cache_port'] @@ -549,16 +538,13 @@ def main(): module.fail_json(msg=str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) elasticache_manager = ElastiCacheManager(module, name, engine, - cache_engine_version, - node_type, + cache_engine_version, node_type, num_nodes, cache_port, parameter_group, cache_subnet_group, cache_security_groups, security_group_ids, zone, wait, - hard_modify, region, - cache_parameter_group_name=cache_parameter_group_name, - **aws_connect_kwargs) + hard_modify, region, **aws_connect_kwargs) if state == 'present': elasticache_manager.ensure_present() From 0de2627efc63e0b1f6d24f0bd96d4e5f276ad275 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Mon, 2 Nov 2015 17:33:04 -0800 Subject: [PATCH 68/86] Make cache_parameter_group the name of this new param to match with similar params (leave old name as an alias) --- cloud/amazon/elasticache.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index d275ba2be82..a22bea70d72 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -63,6 +63,7 @@ options: required: false default: None version_added: "2.0" + aliases: [ 'parameter_group' ] cache_subnet_group: description: - The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc @@ -150,7 +151,7 @@ class ElastiCacheManager(object): EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying'] def __init__(self, module, name, engine, cache_engine_version, node_type, - num_nodes, cache_port, parameter_group, cache_subnet_group, + num_nodes, cache_port, cache_parameter_group, cache_subnet_group, cache_security_groups, security_group_ids, zone, wait, hard_modify, region, **aws_connect_kwargs): self.module = module @@ -160,7 +161,7 @@ class ElastiCacheManager(object): self.node_type = node_type self.num_nodes = num_nodes self.cache_port = cache_port - self.parameter_group = parameter_group + self.cache_parameter_group = cache_parameter_group self.cache_subnet_group = cache_subnet_group self.cache_security_groups = cache_security_groups self.security_group_ids = security_group_ids @@ -219,7 +220,7 @@ class ElastiCacheManager(object): engine_version=self.cache_engine_version, cache_security_group_names=self.cache_security_groups, security_group_ids=self.security_group_ids, - cache_parameter_group_name=self.parameter_group, + cache_parameter_group_name=self.cache_parameter_group, cache_subnet_group_name=self.cache_subnet_group, preferred_availability_zone=self.zone, port=self.cache_port) @@ -295,7 +296,7 @@ class ElastiCacheManager(object): num_cache_nodes=self.num_nodes, cache_node_ids_to_remove=nodes_to_remove, cache_security_group_names=self.cache_security_groups, - cache_parameter_group_name=self.parameter_group, + cache_parameter_group_name=self.cache_parameter_group, security_group_ids=self.security_group_ids, apply_immediately=True, engine_version=self.cache_engine_version) @@ -486,7 +487,8 @@ def main(): cache_engine_version={'required': False}, node_type={'required': False, 'default': 'cache.m1.small'}, num_nodes={'required': False, 'default': None, 'type': 'int'}, - parameter_group={'required': False, 'default': None}, + # alias for compat with the original PR 1950 + cache_parameter_group={'required': False, 'default': None, 'aliases': ['parameter_group']}, cache_port={'required': False, 'type': 'int'}, cache_subnet_group={'required': False, 'default': None}, cache_security_groups={'required': False, 'default': [default], @@ -521,7 +523,7 @@ def main(): zone = module.params['zone'] wait = module.params['wait'] hard_modify = module.params['hard_modify'] - parameter_group = module.params['parameter_group'] + cache_parameter_group = module.params['cache_parameter_group'] if cache_subnet_group and cache_security_groups == [default]: cache_security_groups = [] @@ -540,7 +542,7 @@ def main(): elasticache_manager = ElastiCacheManager(module, name, engine, cache_engine_version, node_type, num_nodes, cache_port, - parameter_group, + cache_parameter_group, cache_subnet_group, cache_security_groups, security_group_ids, zone, wait, From fa2ea225dddaaf82dfa3800746179112fec67c4f Mon Sep 17 00:00:00 2001 From: Lars Larsson Date: Tue, 3 Nov 2015 11:54:31 +0100 Subject: [PATCH 69/86] total_seconds not present on timedelta on python2.6 --- utilities/logic/wait_for.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/utilities/logic/wait_for.py b/utilities/logic/wait_for.py index 1287d9b6057..e30dec11fc6 100644 --- a/utilities/logic/wait_for.py +++ b/utilities/logic/wait_for.py @@ -103,7 +103,7 @@ options: notes: - The ability to use search_regex with a port connection was added in 1.7. requirements: [] -author: +author: - "Jeroen Hoekx (@jhoekx)" - "John Jarvis (@jarv)" - "Andrii Radyk (@AnderEnder)" @@ -127,7 +127,7 @@ EXAMPLES = ''' - wait_for: path=/tmp/foo search_regex=completed # wait until the lock file is removed -- wait_for: path=/var/lock/file.lock state=absent +- wait_for: path=/var/lock/file.lock state=absent # wait until the process is finished and pid was destroyed - wait_for: path=/proc/3466/status state=absent @@ -322,6 +322,11 @@ def _create_connection( (host, port), connect_timeout): connect_socket = socket.create_connection( (host, port), connect_timeout) return connect_socket +def _timedelta_total_seconds(timedelta): + return ( + timedelta.microseconds + 0.0 + + (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6 + def main(): module = AnsibleModule( @@ -432,7 +437,7 @@ def main(): except IOError: pass elif port: - alt_connect_timeout = math.ceil((end - datetime.datetime.now()).total_seconds()) + alt_connect_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.now())) try: s = _create_connection((host, port), min(connect_timeout, alt_connect_timeout)) except: @@ -444,7 +449,7 @@ def main(): data = '' matched = False while datetime.datetime.now() < end: - max_timeout = math.ceil((end - datetime.datetime.now()).total_seconds()) + max_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.now())) (readable, w, e) = select.select([s], [], [], max_timeout) if not readable: # No new data. Probably means our timeout From 2a97e9f2997136a13e7df72f8246cd33438087c0 Mon Sep 17 00:00:00 2001 From: Felix Engelmann Date: Sun, 30 Aug 2015 16:24:13 +0200 Subject: [PATCH 70/86] re-implements #226 in optional (editable) way with backward compatibility --- packaging/language/pip.py | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) mode change 100644 => 100755 packaging/language/pip.py diff --git a/packaging/language/pip.py b/packaging/language/pip.py old mode 100644 new mode 100755 index a4af27ccee5..bdd2b40a1aa --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -90,6 +90,12 @@ options: required: false default: null version_added: "1.0" + editable: + description: + - Pass the editable flag for versioning URLs. + required: false + default: yes + version_added: "2.0" chdir: description: - cd into this directory before running the command @@ -121,6 +127,9 @@ EXAMPLES = ''' # Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args. - pip: name='svn+http://myrepo/svn/MyApp#egg=MyApp' +# Install MyApp using one of the remote protocols (bzr+,hg+,git+) in a non editable way. +- pip: name='git+http://myrepo/app/MyApp' editable=false + # Install (MyApp) from local tarball - pip: name='file:///path/to/MyApp.tar.gz' @@ -239,6 +248,7 @@ def main(): virtualenv_python=dict(default=None, required=False, type='str'), use_mirrors=dict(default='yes', type='bool'), extra_args=dict(default=None, required=False), + editable=dict(default='yes', type='bool', required=False), chdir=dict(default=None, required=False, type='path'), executable=dict(default=None, required=False), ), @@ -312,15 +322,16 @@ def main(): # Automatically apply -e option to extra_args when source is a VCS url. VCS # includes those beginning with svn+, git+, hg+ or bzr+ if name: - if name.startswith('svn+') or name.startswith('git+') or \ - name.startswith('hg+') or name.startswith('bzr+'): - args_list = [] # used if extra_args is not used at all - if extra_args: - args_list = extra_args.split(' ') - if '-e' not in args_list: - args_list.append('-e') - # Ok, we will reconstruct the option string - extra_args = ' '.join(args_list) + if module.params['editable']: + if name.startswith('svn+') or name.startswith('git+') or \ + name.startswith('hg+') or name.startswith('bzr+'): + args_list = [] # used if extra_args is not used at all + if extra_args: + args_list = extra_args.split(' ') + if '-e' not in args_list: + args_list.append('-e') + # Ok, we will reconstruct the option string + extra_args = ' '.join(args_list) if extra_args: cmd += ' %s' % extra_args From cac69f3135d6e4934dcf4a143f7e1717f1a82506 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 3 Nov 2015 13:25:46 -0500 Subject: [PATCH 71/86] added missing version_added --- cloud/amazon/ec2_ami.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloud/amazon/ec2_ami.py b/cloud/amazon/ec2_ami.py index d7a60211bd3..bdb130e2380 100644 --- a/cloud/amazon/ec2_ami.py +++ b/cloud/amazon/ec2_ami.py @@ -86,8 +86,7 @@ options: - Users and groups that should be able to launch the ami. Expects dictionary with a key of user_ids and/or group_names. user_ids should be a list of account ids. group_name should be a list of groups, "all" is the only acceptable value currently. required: false default: null - aliases: [] - + version_added: "2.0" author: "Evan Duffield (@scicoin-project) " extends_documentation_fragment: - aws From adbc430984679ca9469347add900895076c8fedd Mon Sep 17 00:00:00 2001 From: Stewart Rutledge Date: Wed, 4 Nov 2015 14:22:08 +0100 Subject: [PATCH 72/86] Added support for reconfiguring network (moving to another switch, for example) --- cloud/vmware/vsphere_guest.py | 101 ++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index b8adb7930c3..a14f807e049 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -743,6 +743,9 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name # set the new RAM size spec.set_element_memoryMB(int(vm_hardware['memory_mb'])) changes['memory'] = vm_hardware['memory_mb'] + # ===( Reconfigure Network )====# + if vm_nic: + changed = reconfigure_net(vsphere_client, vm, module, esxi, resource_pool, guest, vm_nic, cluster_name) # ====( Config Memory )====# if 'num_cpus' in vm_hardware: @@ -814,6 +817,104 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name module.exit_json(changed=False) +def reconfigure_net(vsphere_client, vm, module, esxi, resource_pool, guest, vm_nic, cluster_name=None): + s = vsphere_client + nics = {} + request = VI.ReconfigVM_TaskRequestMsg() + _this = request.new__this(vm._mor) + _this.set_attribute_type(vm._mor.get_attribute_type()) + request.set_element__this(_this) + nic_changes = [] + datacenter = esxi['datacenter'] + # Datacenter managed object reference + dclist = [k for k, + v in vsphere_client.get_datacenters().items() if v == datacenter] + if dclist: + dcmor=dclist[0] + else: + vsphere_client.disconnect() + module.fail_json(msg="Cannot find datacenter named: %s" % datacenter) + dcprops = VIProperty(vsphere_client, dcmor) + nfmor = dcprops.networkFolder._obj + for k,v in vm_nic.iteritems(): + nicNum = k[len(k) -1] + if vm_nic[k]['network_type'] == 'dvs': + portgroupKey = find_portgroup_key(module, s, nfmor, vm_nic[k]['network']) + todvs = True + elif vm_nic[k]['network_type'] == 'standard': + todvs = False + # Detect cards that need to be changed and network type (and act accordingly) + for dev in vm.properties.config.hardware.device: + if dev._type in ["VirtualE1000", "VirtualE1000e", + "VirtualPCNet32", "VirtualVmxnet", + "VirtualNmxnet2", "VirtualVmxnet3"]: + devNum = dev.deviceInfo.label[len(dev.deviceInfo.label) - 1] + if devNum == nicNum: + fromdvs = dev.deviceInfo.summary.split(':')[0] == 'DVSwitch' + if todvs and fromdvs: + if dev.backing.port._obj.get_element_portgroupKey() != portgroupKey: + nics[k] = (dev, portgroupKey, 1) + elif fromdvs and not todvs: + nics[k] = (dev, '', 2) + elif not fromdvs and todvs: + nics[k] = (dev, portgroupKey, 3) + elif not fromdvs and not todvs: + if dev.backing._obj.get_element_deviceName() != vm_nic[k]['network']: + nics[k] = (dev, '', 2) + else: + pass + else: + module.exit_json() + + if len(nics) > 0: + for nic, obj in nics.iteritems(): + """ + 1,2 and 3 are used to mark which action should be taken + 1 = from a distributed switch to a distributed switch + 2 = to a standard switch + 3 = to a distributed switch + """ + dev = obj[0] + pgKey = obj[1] + dvsKey = obj[2] + if dvsKey == 1: + dev.backing.port._obj.set_element_portgroupKey(pgKey) + dev.backing.port._obj.set_element_portKey('') + if dvsKey == 3: + dvswitch_uuid = find_dvswitch_uuid(module, s, nfmor, pgKey) + nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def( + "nic_backing_port").pyclass() + nic_backing_port.set_element_switchUuid(dvswitch_uuid) + nic_backing_port.set_element_portgroupKey(pgKey) + nic_backing_port.set_element_portKey('') + nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def( + "nic_backing").pyclass() + nic_backing.set_element_port(nic_backing_port) + dev._obj.set_element_backing(nic_backing) + if dvsKey == 2: + nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def( + "nic_backing").pyclass() + nic_backing.set_element_deviceName(vm_nic[nic]['network']) + dev._obj.set_element_backing(nic_backing) + for nic, obj in nics.iteritems(): + dev = obj[0] + spec = request.new_spec() + nic_change = spec.new_deviceChange() + nic_change.set_element_device(dev._obj) + nic_change.set_element_operation("edit") + nic_changes.append(nic_change) + spec.set_element_deviceChange(nic_changes) + request.set_element_spec(spec) + ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval + task = VITask(ret, vsphere_client) + status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR]) + if status == task.STATE_SUCCESS: + return(True) + elif status == task.STATE_ERROR: + module.fail_json(msg="Could not change network %s" % task.get_error_message()) + elif len(nics) == 0: + return(False) + def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, vm_hw_version, state): datacenter = esxi['datacenter'] From 3f5d6df5f717d80e8a59fe66ca6be79e491ec80a Mon Sep 17 00:00:00 2001 From: nitzmahone Date: Wed, 4 Nov 2015 17:29:08 -0800 Subject: [PATCH 73/86] fixed ansible_totalmem fact returning 0 Win32_PhysicalMemory CIM object is busted on some virtual environments, switched to Win32_ComputerSystem.TotalPhysicalMemory --- windows/setup.ps1 | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/windows/setup.ps1 b/windows/setup.ps1 index 0b3e2c897e3..4d163c7ec26 100644 --- a/windows/setup.ps1 +++ b/windows/setup.ps1 @@ -26,11 +26,9 @@ $result = New-Object psobject @{ }; $win32_os = Get-CimInstance Win32_OperatingSystem +$win32_cs = Get-CimInstance Win32_ComputerSystem $osversion = [Environment]::OSVersion -$memory = @() -$memory += Get-WmiObject win32_Physicalmemory -$capacity = 0 -$memory | foreach {$capacity += $_.Capacity} +$capacity = $win32_cs.TotalPhysicalMemory # Win32_PhysicalMemory is empty on some virtual platforms $netcfg = Get-WmiObject win32_NetworkAdapterConfiguration $ActiveNetcfg = @(); $ActiveNetcfg+= $netcfg | where {$_.ipaddress -ne $null} From 77d3678acfa3a19517aeb45214118aa8b74637a9 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 6 Nov 2015 09:31:20 -0800 Subject: [PATCH 74/86] Fix escaping of newline carriage return characters in the documentation --- windows/win_template.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/windows/win_template.py b/windows/win_template.py index e8323362dd6..4ffcaafe2c7 100644 --- a/windows/win_template.py +++ b/windows/win_template.py @@ -15,7 +15,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: win_template version_added: "1.9.2" @@ -47,8 +47,8 @@ notes: - "templates are loaded with C(trim_blocks=True)." - By default, windows line endings are not created in the generated file. - "In order to ensure windows line endings are in the generated file, add the following header - as the first line of your template: #jinja2: newline_sequence:'\r\n' and ensure each line - of the template ends with \r\n" + as the first line of your template: #jinja2: newline_sequence:'\\\\r\\\\n' and ensure each line + of the template ends with \\\\r\\\\n" - Beware fetching files from windows machines when creating templates because certain tools, such as Powershell ISE, and regedit's export facility add a Byte Order Mark as the first character of the file, which can cause tracebacks. From 4ed7b690f685f87934fdb304b302f96de9f09139 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 6 Nov 2015 10:53:33 -0800 Subject: [PATCH 75/86] Add a new contributor as a maintainer of the docker module --- cloud/docker/docker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index 2b2e3ea9b4b..befe3bd0510 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -331,6 +331,7 @@ author: - "Joshua Conner (@joshuaconner)" - "Pavel Antonov (@softzilla)" - "Ash Wilson (@smashwilson)" + - "Thomas Steinbach (@ThomasSteinbach)" requirements: - "python >= 2.6" - "docker-py >= 0.3.0" From f2943bd4045a32e2c4967fbc87f4385b86cd1d79 Mon Sep 17 00:00:00 2001 From: Toshio Kuratomi Date: Fri, 6 Nov 2015 21:18:46 -0800 Subject: [PATCH 76/86] Add zfil as an owner of the docker module --- cloud/docker/docker.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index befe3bd0510..0ecfb93b0c2 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -332,6 +332,7 @@ author: - "Pavel Antonov (@softzilla)" - "Ash Wilson (@smashwilson)" - "Thomas Steinbach (@ThomasSteinbach)" + - "Philippe Jandot (@zfil)" requirements: - "python >= 2.6" - "docker-py >= 0.3.0" From e74dc8c1ddc0fb6de51797c2c23881a109d6930a Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Sat, 7 Nov 2015 08:23:35 -0500 Subject: [PATCH 77/86] minor doc fixes --- files/ini_file.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/files/ini_file.py b/files/ini_file.py index d837c329d4b..ce286741981 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -29,8 +29,7 @@ description: - Manage (add, remove, change) individual settings in an INI-style file without having to manage the file as a whole with, say, M(template) or M(assemble). Adds missing sections if they don't exist. - - Comments are discarded when the source file is read, and therefore will not - show up in the destination file. + - Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file. version_added: "0.9" options: dest: @@ -80,7 +79,9 @@ notes: Either use M(template) to create a base INI file with a C([default]) section, or use M(lineinfile) to add the missing line. requirements: [ ConfigParser ] -author: "Jan-Piet Mens (@jpmens), Ales Nosek" +author: + - "Jan-Piet Mens (@jpmens)" + - "Ales Nosek (@noseka1)" ''' EXAMPLES = ''' From ca12ed5d98eec5c4ec210c11c34e7efa31405693 Mon Sep 17 00:00:00 2001 From: Timothy Appnel Date: Sat, 7 Nov 2015 18:16:20 -0500 Subject: [PATCH 78/86] Added the checksum_algo alias to the stats module. --- files/stat.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/files/stat.py b/files/stat.py index 61c77a2ef31..852ddd5afd2 100644 --- a/files/stat.py +++ b/files/stat.py @@ -53,6 +53,7 @@ options: required: false choices: [ 'sha1', 'sha224', 'sha256', 'sha384', 'sha512' ] default: sha1 + aliases: [ 'checksum_algo' ] version_added: "2.0" author: "Bruce Pennypacker (@bpennypacker)" ''' @@ -292,7 +293,7 @@ def main(): follow = dict(default='no', type='bool'), get_md5 = dict(default='yes', type='bool'), get_checksum = dict(default='yes', type='bool'), - checksum_algorithm = dict(default='sha1', type='str', choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512']) + checksum_algorithm = dict(default='sha1', type='str', choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512'], aliases=['checksum_algo']) ), supports_check_mode = True ) @@ -381,4 +382,4 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() \ No newline at end of file +main() From ccb39767cf7841682d4f89b87030f2d4761b5751 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 9 Nov 2015 10:39:56 -0500 Subject: [PATCH 79/86] Use add_ips_to_server API This module is still using an old pre-release API and needs to update to use the current API. Co-Authored-By: Marton Kiss --- cloud/openstack/os_floating_ip.py | 25 +++++++------------------ 1 file changed, 7 insertions(+), 18 deletions(-) diff --git a/cloud/openstack/os_floating_ip.py b/cloud/openstack/os_floating_ip.py index 10827012ae8..957e3057375 100644 --- a/cloud/openstack/os_floating_ip.py +++ b/cloud/openstack/os_floating_ip.py @@ -122,10 +122,10 @@ def main(): argument_spec = openstack_full_argument_spec( server=dict(required=True), state=dict(default='present', choices=['absent', 'present']), - network=dict(required=False), - floating_ip_address=dict(required=False), + network=dict(required=False, default=None), + floating_ip_address=dict(required=False, default=None), reuse=dict(required=False, type='bool', default=False), - fixed_address=dict(required=False), + fixed_address=dict(required=False, default=None), wait=dict(required=False, type='bool', default=False), timeout=dict(required=False, type='int', default=60), ) @@ -154,23 +154,12 @@ def main(): msg="server {0} not found".format(server_name_or_id)) if state == 'present': - if floating_ip_address is None: - if reuse: - f_ip = cloud.available_floating_ip(network=network) - else: - f_ip = cloud.create_floating_ip(network=network) - else: - f_ip = _get_floating_ip(cloud, floating_ip_address) - if f_ip is None: - module.fail_json( - msg="floating IP {0} not found".format( - floating_ip_address)) - - cloud.attach_ip_to_server( - server_id=server['id'], floating_ip_id=f_ip['id'], + cloud.add_ips_to_server( + server=server, ips=floating_ip_address, reuse=reuse, fixed_address=fixed_address, wait=wait, timeout=timeout) + fip_address = cloud.get_server_public_ip(server) # Update the floating IP status - f_ip = cloud.get_floating_ip(id=f_ip['id']) + f_ip = _get_floating_ip(cloud, fip_address) module.exit_json(changed=True, floating_ip=f_ip) elif state == 'absent': From 6584b59d91d096406453b8cfc816730bbba0267e Mon Sep 17 00:00:00 2001 From: Jordi De Groof Date: Wed, 11 Nov 2015 20:23:24 +0100 Subject: [PATCH 80/86] Update facts when hostname is changed ansible_hostname contains the unqualified hostname --- system/hostname.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/system/hostname.py b/system/hostname.py index 2914088691a..0d4ca085b83 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -42,6 +42,7 @@ EXAMPLES = ''' - hostname: name=web01 ''' +import socket from distutils.version import LooseVersion # import module snippets @@ -563,6 +564,10 @@ def main(): hostname.set_permanent_hostname(name) changed = True - module.exit_json(changed=changed, name=name, ansible_facts=dict(ansible_hostname=name)) + module.exit_json(changed=changed, name=name, + ansible_facts=dict(ansible_hostname=name.split('.')[0], + ansible_nodename=name, + ansible_fqdn=socket.getfqdn(), + ansible_domain='.'.join(socket.getfqdn().split('.')[1:]))) main() From 889274a5256fb2f60667d2964e233141eeaa14fd Mon Sep 17 00:00:00 2001 From: J Levitt Date: Wed, 11 Nov 2015 15:15:30 -0600 Subject: [PATCH 81/86] Add rds restore example to list of examples There was no db restore example. I've provided one that shows how to do the restore, then add a security group (you cannot add the security group during the restore step -- it has to be done in a modify step afterward). Also, I show how to get the endpoint. --- cloud/amazon/rds.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index 1eb4cc8ab1d..d8f5a2cea86 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -271,6 +271,33 @@ EXAMPLES = ''' command: reboot instance_name: database wait: yes + +# Restore a Postgres db instance from a snapshot, wait for it to become available again, and +# then modify it to add your security group. Also, display the new endpoint. +# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI +- local_action: + module: rds + command: restore + snapshot: mypostgres-snapshot + instance_name: MyNewInstanceName + region: us-west-2 + zone: us-west-2b + subnet: default-vpc-xx441xxx + publicly_accessible: yes + wait: yes + wait_timeout: 600 + tags: + Name: pg1_test_name_tag + register: rds + +- local_action: + module: rds + command: modify + instance_name: MyNewInstanceName + region: us-west-2 + vpc_security_groups: sg-xxx945xx + +- debug: msg="The new db endpoint is {{ rds.instance.endpoint }}" ''' From 6e37f1dcef0e38ea6b9222cf49aa66df0e3a3c45 Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 12 Nov 2015 09:39:37 -0800 Subject: [PATCH 82/86] fixed remote_src support, now actually copies and does not move --- files/copy.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/files/copy.py b/files/copy.py index da976f9a692..812b2d9ab7d 100644 --- a/files/copy.py +++ b/files/copy.py @@ -19,7 +19,7 @@ # along with Ansible. If not, see . import os -import time +import tempfile DOCUMENTATION = ''' --- @@ -214,7 +214,8 @@ def main(): backup = dict(default=False, type='bool'), force = dict(default=True, aliases=['thirsty'], type='bool'), validate = dict(required=False, type='str'), - directory_mode = dict(required=False) + directory_mode = dict(required=False), + remote_src = dict(required=False, type='bool'), ), add_file_common_args=True, supports_check_mode=True, @@ -228,6 +229,7 @@ def main(): validate = module.params.get('validate',None) follow = module.params['follow'] mode = module.params['mode'] + remote_src = module.params['remote_src'] if not os.path.exists(src): module.fail_json(msg="Source %s failed to transfer" % (src)) @@ -307,7 +309,12 @@ def main(): (rc,out,err) = module.run_command(validate % src) if rc != 0: module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc,err)) - module.atomic_move(src, dest) + if remote_src: + tmpdest = tempfile.mkstemp(dir=os.basedir(dest)) + shutil.copy2(src, tmpdest) + module.atomic_move(tmpdest, dest) + else: + module.atomic_move(src, dest) except IOError: module.fail_json(msg="failed to copy: %s to %s" % (src, dest)) changed = True From 3193961cf5d7088cee6716f93fd642b763995fd9 Mon Sep 17 00:00:00 2001 From: Marcin Stolarek Date: Fri, 13 Nov 2015 10:45:27 +0100 Subject: [PATCH 83/86] It may be string with int comparison, if ansible user specifies identifier as int --- cloud/amazon/route53.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 9b867fb1e72..443b71be921 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -389,7 +389,7 @@ def main(): decoded_name = rset.name.replace(r'\052', '*') decoded_name = decoded_name.replace(r'\100', '@') - if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == identifier_in: + if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == str(identifier_in): found_record = True record['zone'] = zone_in record['type'] = rset.type From b6223ca729b0f4fb238eb30a31447c6a55fbca95 Mon Sep 17 00:00:00 2001 From: Marcin Stolarek Date: Fri, 13 Nov 2015 11:32:10 +0100 Subject: [PATCH 84/86] Save changes of special characters to rset, without that comparison rset.to_xml() == wanted_rset.to_xml() will fail if record contains * or @ characters. --- cloud/amazon/route53.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 443b71be921..ec4dc533005 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -388,6 +388,8 @@ def main(): # tripping of things like * and @. decoded_name = rset.name.replace(r'\052', '*') decoded_name = decoded_name.replace(r'\100', '@') + #Need to save this changes in rset, because of comparing rset.to_xml() == wanted_rset.to_xml() in next block + rset.name = decoded_name if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == str(identifier_in): found_record = True From 572771d0b1eb6d94ea9a596b7a719d3a2d0b651b Mon Sep 17 00:00:00 2001 From: James Cammarata Date: Fri, 13 Nov 2015 16:46:32 -0500 Subject: [PATCH 85/86] Version bump for new beta 2.0.0-0.5.beta3 --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index f802f1a2cdb..47c909bbc53 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0.0-0.4.beta2 +2.0.0-0.5.beta3 From 21f6390fa34ca0e0a4736f4f2803b22356953d0f Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Tue, 17 Nov 2015 10:05:15 -0800 Subject: [PATCH 86/86] clarified set_fact function --- utilities/logic/set_fact.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/utilities/logic/set_fact.py b/utilities/logic/set_fact.py index f05dbf76795..3dc729d07dc 100644 --- a/utilities/logic/set_fact.py +++ b/utilities/logic/set_fact.py @@ -24,9 +24,8 @@ author: "Dag Wieers (@dagwieers)" module: set_fact short_description: Set host facts from a task description: - - This module allows setting new variables. Variables are set on a host-by-host basis - just like facts discovered by the setup module. - - These variables will survive between plays. + - This module allows setting new variables. Variables are set on a host-by-host basis just like facts discovered by the setup module. + - These variables will survive between plays during an Ansible run, but will not be saved across executions even if you use a fact cache. options: key_value: description: