diff --git a/README.md b/README.md index 0535e4a302b..09a35854879 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,22 @@ New module submissions for modules that do not yet exist should be submitted to Take care to submit tickets to the appropriate repo where modules are contained. The docs.ansible.com website indicates this at the bottom of each module documentation page. +Reporting bugs +============== + +Take care to submit tickets to the appropriate repo where modules are contained. The repo is mentioned at the bottom of module documentation page at [docs.ansible.com](http://docs.ansible.com/). + +Testing modules +=============== + +Ansible [module development guide](http://docs.ansible.com/developing_modules.html#testing-modules) contains the latest info about that. + License ======= -As with Ansible, modules distributed with Ansible are GPLv3 licensed. User generated modules not part of this project can be of any license. +As with Ansible, modules distributed with Ansible are GPLv3 licensed. User generated modules not part of this project can be of any license. + +Installation +============ + +There should be no need to install this repo separately as it should be included in any Ansible install using the official documented methods. diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py index 1c8a9d6aca5..b382e3f05ff 100644 --- a/cloud/amazon/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -130,13 +130,6 @@ except ImportError: sys.exit(1) -class Region: - def __init__(self, region): - '''connects boto to the region specified in the cloudformation template''' - self.name = region - self.endpoint = 'cloudformation.%s.amazonaws.com' % region - - def boto_exception(err): '''generic error message handler''' if hasattr(err, 'error_message'): @@ -239,11 +232,10 @@ def main(): stack_outputs = {} try: - cf_region = Region(region) - cfn = boto.cloudformation.connection.CloudFormationConnection( - aws_access_key_id=aws_access_key, + cfn = boto.cloudformation.connect_to_region( + region, + aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, - region=cf_region, ) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py old mode 100644 new mode 100755 index fc4ec64b8a4..10791439556 --- a/cloud/amazon/ec2.py +++ b/cloud/amazon/ec2.py @@ -61,6 +61,13 @@ options: required: true default: null aliases: [] + tenancy: + version_added: "1.9" + description: + - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are "default" or "dedicated". Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. + required: false + default: default + aliases: [] spot_price: version_added: "1.5" description: @@ -299,6 +306,18 @@ EXAMPLES = ''' vpc_subnet_id: subnet-29e63245 assign_public_ip: yes +# Dedicated tenancy example +- local_action: + module: ec2 + assign_public_ip: yes + group_id: sg-1dc53f72 + key_name: mykey + image: ami-6e649707 + instance_type: m1.small + tenancy: dedicated + vpc_subnet_id: subnet-29e63245 + wait: yes + # Spot instance example - ec2: spot_price: 0.24 @@ -476,6 +495,7 @@ try: import boto.ec2 from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping from boto.exception import EC2ResponseError + from boto.vpc import VPCConnection except ImportError: print "failed=True msg='boto required for this module'" sys.exit(1) @@ -571,7 +591,10 @@ def get_instance_info(inst): 'root_device_type': inst.root_device_type, 'root_device_name': inst.root_device_name, 'state': inst.state, - 'hypervisor': inst.hypervisor} + 'hypervisor': inst.hypervisor, + 'tags': inst.tags, + 'groups': dict((group.id, group.name) for group in inst.groups), + } try: instance_info['virtualization_type'] = getattr(inst,'virtualization_type') except AttributeError: @@ -582,6 +605,11 @@ def get_instance_info(inst): except AttributeError: instance_info['ebs_optimized'] = False + try: + instance_info['tenancy'] = getattr(inst, 'placement_tenancy') + except AttributeError: + instance_info['tenancy'] = 'default' + return instance_info def boto_supports_associate_public_ip_address(ec2): @@ -651,7 +679,7 @@ def boto_supports_param_in_spot_request(ec2, param): method = getattr(ec2, 'request_spot_instances') return param in method.func_code.co_varnames -def enforce_count(module, ec2): +def enforce_count(module, ec2, vpc): exact_count = module.params.get('exact_count') count_tag = module.params.get('count_tag') @@ -676,7 +704,7 @@ def enforce_count(module, ec2): to_create = exact_count - len(instances) if not checkmode: (instance_dict_array, changed_instance_ids, changed) \ - = create_instances(module, ec2, override_count=to_create) + = create_instances(module, ec2, vpc, override_count=to_create) for inst in instance_dict_array: instances.append(inst) @@ -707,7 +735,7 @@ def enforce_count(module, ec2): return (all_instances, instance_dict_array, changed_instance_ids, changed) -def create_instances(module, ec2, override_count=None): +def create_instances(module, ec2, vpc, override_count=None): """ Creates new instances @@ -725,6 +753,7 @@ def create_instances(module, ec2, override_count=None): group_id = module.params.get('group_id') zone = module.params.get('zone') instance_type = module.params.get('instance_type') + tenancy = module.params.get('tenancy') spot_price = module.params.get('spot_price') image = module.params.get('image') if override_count: @@ -755,25 +784,29 @@ def create_instances(module, ec2, override_count=None): module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)")) sys.exit(1) + vpc_id = None + if vpc_subnet_id: + vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id + else: + vpc_id = None + try: # Here we try to lookup the group id from the security group name - if group is set. if group_name: - grp_details = ec2.get_all_security_groups() - if type(group_name) == list: - group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] - elif type(group_name) == str: - for grp in grp_details: - if str(group_name) in str(grp): - group_id = [str(grp.id)] + if vpc_id: + grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id}) + else: + grp_details = ec2.get_all_security_groups() + if isinstance(group_name, basestring): group_name = [group_name] + group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ] # Now we try to lookup the group id testing if group exists. elif group_id: #wrap the group_id in a list if it's not one already - if type(group_id) == str: + if isinstance(group_id, basestring): group_id = [group_id] grp_details = ec2.get_all_security_groups(group_ids=group_id) - grp_item = grp_details[0] - group_name = [grp_item.name] + group_name = [grp_item.name for grp_item in grp_details] except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg = str(e)) @@ -808,6 +841,10 @@ def create_instances(module, ec2, override_count=None): if ebs_optimized: params['ebs_optimized'] = ebs_optimized + + # 'tenancy' always has a default value, but it is not a valid parameter for spot instance resquest + if not spot_price: + params['tenancy'] = tenancy if boto_supports_profile_name_arg(ec2): params['instance_profile_name'] = instance_profile_name @@ -887,6 +924,18 @@ def create_instances(module, ec2, override_count=None): continue else: module.fail_json(msg = str(e)) + + # The instances returned through ec2.run_instances above can be in + # terminated state due to idempotency. See commit 7f11c3d for a complete + # explanation. + terminated_instances = [ + str(instance.id) for instance in res.instances if instance.state == 'terminated' + ] + if terminated_instances: + module.fail_json(msg = "Instances with id(s) %s " % terminated_instances + + "were created previously but have since been terminated - " + + "use a (possibly different) 'instanceid' parameter") + else: if private_ip: module.fail_json( @@ -924,15 +973,6 @@ def create_instances(module, ec2, override_count=None): except boto.exception.BotoServerError, e: module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message)) - # The instances returned through run_instances can be in - # terminated state due to idempotency. - terminated_instances = [ str(instance.id) for instance in res.instances - if instance.state == 'terminated' ] - if terminated_instances: - module.fail_json(msg = "Instances with id(s) %s " % terminated_instances + - "were created previously but have since been terminated - " + - "use a (possibly different) 'instanceid' parameter") - # wait here until the instances are up num_running = 0 wait_timeout = time.time() + wait_timeout @@ -1150,6 +1190,7 @@ def main(): count_tag = dict(), volumes = dict(type='list'), ebs_optimized = dict(type='bool', default=False), + tenancy = dict(default='default'), ) ) @@ -1164,6 +1205,20 @@ def main(): ec2 = ec2_connect(module) + ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) + + if region: + try: + vpc = boto.vpc.connect_to_region( + region, + aws_access_key_id=aws_access_key, + aws_secret_access_key=aws_secret_key + ) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg = str(e)) + else: + module.fail_json(msg="region must be specified") + tagged_instances = [] state = module.params.get('state') @@ -1188,9 +1243,9 @@ def main(): module.fail_json(msg='image parameter is required for new instance') if module.params.get('exact_count') is None: - (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2) + (instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc) else: - (tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2) + (tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc) module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances) diff --git a/cloud/amazon/ec2_ami.py b/cloud/amazon/ec2_ami.py index ab1f986356b..401b667c545 100644 --- a/cloud/amazon/ec2_ami.py +++ b/cloud/amazon/ec2_ami.py @@ -242,7 +242,10 @@ def main(): ) module = AnsibleModule(argument_spec=argument_spec) - ec2 = ec2_connect(module) + try: + ec2 = ec2_connect(module) + except Exception, e: + module.json_fail(msg="Error while connecting to aws: %s" % str(e)) if module.params.get('state') == 'absent': if not module.params.get('image_id'): diff --git a/cloud/amazon/ec2_ami_search.py b/cloud/amazon/ec2_ami_search.py index 70664cf5f8d..1dd5f056e96 100644 --- a/cloud/amazon/ec2_ami_search.py +++ b/cloud/amazon/ec2_ami_search.py @@ -57,7 +57,8 @@ options: required: false default: us-east-1 choices: ["ap-northeast-1", "ap-southeast-1", "ap-southeast-2", - "eu-west-1", "sa-east-1", "us-east-1", "us-west-1", "us-west-2", "us-gov-west-1"] + "eu-central-1", "eu-west-1", "sa-east-1", "us-east-1", + "us-west-1", "us-west-2", "us-gov-west-1"] virt: description: virutalization type required: false @@ -89,11 +90,13 @@ SUPPORTED_DISTROS = ['ubuntu'] AWS_REGIONS = ['ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', + 'eu-central-1', 'eu-west-1', 'sa-east-1', 'us-east-1', 'us-west-1', - 'us-west-2'] + 'us-west-2', + "us-gov-west-1"] def get_url(module, url): diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py index c5de2fac07f..6e5d3508cb8 100644 --- a/cloud/amazon/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -274,7 +274,7 @@ def create_autoscaling_group(connection, module): region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) elif vpc_zone_identifier: vpc_zone_identifier = ','.join(vpc_zone_identifier) @@ -326,6 +326,8 @@ def create_autoscaling_group(connection, module): for attr in ASG_ATTRIBUTES: if module.params.get(attr): module_attr = module.params.get(attr) + if attr == 'vpc_zone_identifier': + module_attr = ','.join(module_attr) group_attr = getattr(as_group, attr) # we do this because AWS and the module may return the same list # sorted differently diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py index 52f8c4a4bf9..11abd827b2b 100644 --- a/cloud/amazon/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -258,7 +258,7 @@ class ElbManager: try: elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) elbs = elb.get_all_load_balancers() @@ -278,7 +278,7 @@ class ElbManager: try: ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) return ec2.get_only_instances(instance_ids=[self.instance_id])[0] diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py index 1ebccf73cdf..19d530c2a0f 100644 --- a/cloud/amazon/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -328,7 +328,9 @@ class ElbManager(object): 'security_group_ids': check_elb.security_groups, 'status': self.status, 'subnets': self.subnets, - 'scheme': check_elb.scheme + 'scheme': check_elb.scheme, + 'hosted_zone_name': check_elb.canonical_hosted_zone_name, + 'hosted_zone_id': check_elb.canonical_hosted_zone_name_id } if check_elb.health_check: @@ -341,7 +343,7 @@ class ElbManager(object): } if check_elb.listeners: - info['listeners'] = [l.get_complex_tuple() + info['listeners'] = [self._api_listener_as_tuple(l) for l in check_elb.listeners] elif self.status == 'created': # When creating a new ELB, listeners don't show in the @@ -375,7 +377,7 @@ class ElbManager(object): try: return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) def _delete_elb(self): @@ -431,7 +433,7 @@ class ElbManager(object): # Since ELB allows only one listener on each incoming port, a # single match on the incomping port is all we're looking for if existing_listener[0] == listener['load_balancer_port']: - existing_listener_found = existing_listener.get_complex_tuple() + existing_listener_found = self._api_listener_as_tuple(existing_listener) break if existing_listener_found: @@ -451,7 +453,7 @@ class ElbManager(object): # Check for any extraneous listeners we need to remove, if desired if self.purge_listeners: for existing_listener in self.elb.listeners: - existing_listener_tuple = existing_listener.get_complex_tuple() + existing_listener_tuple = self._api_listener_as_tuple(existing_listener) if existing_listener_tuple in listeners_to_remove: # Already queued for removal continue @@ -468,6 +470,13 @@ class ElbManager(object): if listeners_to_add: self._create_elb_listeners(listeners_to_add) + def _api_listener_as_tuple(self, listener): + """Adds ssl_certificate_id to ELB API tuple if present""" + base_tuple = listener.get_complex_tuple() + if listener.ssl_certificate_id and len(base_tuple) < 5: + return base_tuple + (listener.ssl_certificate_id,) + return base_tuple + def _listener_as_tuple(self, listener): """Formats listener as a 4- or 5-tuples, in the order specified by the ELB API""" diff --git a/cloud/amazon/ec2_facts.py b/cloud/amazon/ec2_facts.py index 9cae0989a95..cf2a90aabc5 100644 --- a/cloud/amazon/ec2_facts.py +++ b/cloud/amazon/ec2_facts.py @@ -63,6 +63,7 @@ class Ec2Metadata(object): AWS_REGIONS = ('ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', + 'eu-central-1', 'eu-west-1', 'sa-east-1', 'us-east-1', diff --git a/cloud/amazon/ec2_group.py b/cloud/amazon/ec2_group.py index 59623e96d64..b502bd1db53 100644 --- a/cloud/amazon/ec2_group.py +++ b/cloud/amazon/ec2_group.py @@ -128,7 +128,7 @@ def make_rule_key(prefix, rule, group_id, cidr_ip): def addRulesToLookup(rules, prefix, dict): for rule in rules: for grant in rule.grants: - dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = rule + dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = (rule, grant) def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id): @@ -304,14 +304,13 @@ def main(): # Finally, remove anything left in the groupRules -- these will be defunct rules if purge_rules: - for rule in groupRules.itervalues() : - for grant in rule.grants: - grantGroup = None - if grant.group_id: - grantGroup = groups[grant.group_id] - if not module.check_mode: - group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup) - changed = True + for (rule, grant) in groupRules.itervalues() : + grantGroup = None + if grant.group_id: + grantGroup = groups[grant.group_id] + if not module.check_mode: + group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup) + changed = True # Manage egress rules groupRules = {} @@ -369,20 +368,19 @@ def main(): # Finally, remove anything left in the groupRules -- these will be defunct rules if purge_rules_egress: - for rule in groupRules.itervalues(): - for grant in rule.grants: - grantGroup = None - if grant.group_id: - grantGroup = groups[grant.group_id].id - if not module.check_mode: - ec2.revoke_security_group_egress( - group_id=group.id, - ip_protocol=rule.ip_protocol, - from_port=rule.from_port, - to_port=rule.to_port, - src_group_id=grantGroup, - cidr_ip=grant.cidr_ip) - changed = True + for (rule, grant) in groupRules.itervalues(): + grantGroup = None + if grant.group_id: + grantGroup = groups[grant.group_id].id + if not module.check_mode: + ec2.revoke_security_group_egress( + group_id=group.id, + ip_protocol=rule.ip_protocol, + from_port=rule.from_port, + to_port=rule.to_port, + src_group_id=grantGroup, + cidr_ip=grant.cidr_ip) + changed = True if group: module.exit_json(changed=changed, group_id=group.id) diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py index 8fbdcea3e66..30f532c9e4f 100644 --- a/cloud/amazon/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -93,7 +93,6 @@ options: description: - Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC. required: false - default: false aliases: [] version_added: "1.8" ramdisk_id: @@ -255,7 +254,7 @@ def main(): ebs_optimized=dict(default=False, type='bool'), associate_public_ip_address=dict(type='bool'), instance_monitoring=dict(default=False, type='bool'), - assign_public_ip=dict(default=False, type='bool') + assign_public_ip=dict(type='bool') ) ) @@ -265,7 +264,7 @@ def main(): try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) state = module.params.get('state') diff --git a/cloud/amazon/ec2_metric_alarm.py b/cloud/amazon/ec2_metric_alarm.py index 519f88f24f8..7a8d573ce74 100644 --- a/cloud/amazon/ec2_metric_alarm.py +++ b/cloud/amazon/ec2_metric_alarm.py @@ -271,7 +271,7 @@ def main(): region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) if state == 'present': diff --git a/cloud/amazon/ec2_scaling_policy.py b/cloud/amazon/ec2_scaling_policy.py index ad1fa7ce7f1..8e7d459e3e3 100644 --- a/cloud/amazon/ec2_scaling_policy.py +++ b/cloud/amazon/ec2_scaling_policy.py @@ -163,9 +163,7 @@ def main(): try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - if not connection: - module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region)) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg = str(e)) if state == 'present': diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py index 7919a9ec47e..01a539ae4b0 100644 --- a/cloud/amazon/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -24,9 +24,9 @@ version_added: "1.1" options: instance: description: - - instance ID if you wish to attach the volume. + - instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach. required: false - default: null + default: null aliases: [] name: description: @@ -55,7 +55,7 @@ options: required: false default: standard aliases: [] - version_added: "1.8" + version_added: "1.9" iops: description: - the provisioned IOPs you want to associate with this volume (integer). @@ -152,12 +152,12 @@ EXAMPLES = ''' image: "{{ image }}" zone: YYYYYY id: my_instance - wait: yes + wait: yes count: 1 register: ec2 - ec2_vol: - instance: "{{ item.id }}" + instance: "{{ item.id }}" name: my_existing_volume_Name_tag device_name: /dev/xvdf with_items: ec2.instances @@ -168,16 +168,20 @@ EXAMPLES = ''' id: vol-XXXXXXXX state: absent +# Detach a volume (since 1.9) +- ec2_vol: + id: vol-XXXXXXXX + instance: None + # List volumes for an instance - ec2_vol: instance: i-XXXXXX state: list - + # Create new volume using SSD storage -- local_action: - module: ec2_vol - instance: XXXXXX - volume_size: 50 +- ec2_vol: + instance: XXXXXX + volume_size: 50 volume_type: gp2 device_name: /dev/xvdf ''' @@ -261,15 +265,18 @@ def create_volume(module, ec2, zone): if iops: volume_type = 'io1' + if instance == 'None' or instance == '': + instance = None + # If no instance supplied, try volume creation based on module parameters. if name or id: - if not instance: - module.fail_json(msg = "If name or id is specified, instance must also be specified") if iops or volume_size: module.fail_json(msg = "Parameters are not compatible: [id or name] and [iops or volume_size]") volume = get_volume(module, ec2) if volume.attachment_state() is not None: + if instance is None: + return volume adata = volume.attach_data if adata.instance_id != instance: module.fail_json(msg = "Volume %s is already attached to another instance: %s" @@ -331,6 +338,13 @@ def attach_volume(module, ec2, volume, instance): except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) +def detach_volume(module, ec2): + vol = get_volume(module, ec2) + if not vol or vol.attachment_state() is None: + module.exit_json(changed=False) + else: + vol.detach() + module.exit_json(changed=True) def main(): argument_spec = ec2_argument_spec() @@ -362,6 +376,9 @@ def main(): snapshot = module.params.get('snapshot') state = module.params.get('state') + if instance == 'None' or instance == '': + instance = None + ec2 = ec2_connect(module) if state == 'list': @@ -428,6 +445,8 @@ def main(): volume = create_volume(module, ec2, zone) if instance: attach_volume(module, ec2, volume, inst) + else: + detach_volume(module, ec2) module.exit_json(volume_id=volume.id, device=device_name, volume_type=volume.type) # import module snippets diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py index c1846f525a8..4e76d593cc9 100644 --- a/cloud/amazon/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -357,7 +357,9 @@ class ElastiCacheManager(object): 'modifying': 'available', 'deleting': 'gone' } - + if self.status == awaited_status: + # No need to wait, we're already done + return if status_map[self.status] != awaited_status: msg = "Invalid awaited status. '%s' cannot transition to '%s'" self.module.fail_json(msg=msg % (self.status, awaited_status)) diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py index d6fd1622161..879143c03f6 100644 --- a/cloud/amazon/rds.py +++ b/cloud/amazon/rds.py @@ -20,7 +20,7 @@ module: rds version_added: "1.3" short_description: create, delete, or modify an Amazon rds instance description: - - Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. + - Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0) options: command: description: @@ -31,8 +31,8 @@ options: choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'restore' ] instance_name: description: - - Database instance identifier. - required: true + - Database instance identifier. Required except when using command=facts or command=delete on just a snapshot + required: false default: null aliases: [] source_instance: @@ -179,7 +179,7 @@ options: aliases: [] snapshot: description: - - Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. Used only when command=delete or command=snapshot. + - Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot. required: false default: null aliases: [] @@ -220,8 +220,29 @@ options: default: null aliases: [] version_added: 1.5 + character_set_name: + description: + - Associate the DB instance with a specified character set. Used with command=create. + required: false + default: null + aliases: [] + version_added: 1.9 + publicly_accessible: + description: + - explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0 + required: false + default: null + aliases: [] + version_added: 1.9 + tags: + description: + - tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0 + required: false + default: null + aliases: [] + version_added: 1.9 requirements: [ "boto" ] -author: Bruce Pennypacker +author: Bruce Pennypacker, Will Thames ''' # FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD @@ -230,17 +251,20 @@ EXAMPLES = ''' # Basic mysql provisioning example - rds: command: create - instance_name: new_database + instance_name: new-database db_engine: MySQL size: 10 instance_type: db.m1.small username: mysql_admin password: 1nsecure + tags: + Environment: testing + Application: cms # Create a read-only replica and wait for it to become available -- rds: +- rds: command: replicate - instance_name: new_database_replica + instance_name: new-database-replica source_instance: new_database wait: yes wait_timeout: 600 @@ -248,20 +272,20 @@ EXAMPLES = ''' # Delete an instance, but create a snapshot before doing so - rds: command: delete - instance_name: new_database + instance_name: new-database snapshot: new_database_snapshot # Get facts about an instance - rds: command: facts - instance_name: new_database + instance_name: new-database register: new_database_facts # Rename an instance and wait for the change to take effect - rds: command: modify - instance_name: new_database - new_instance_name: renamed_database + instance_name: new-database + new_instance_name: renamed-database wait: yes ''' @@ -274,376 +298,715 @@ except ImportError: print "failed=True msg='boto required for this module'" sys.exit(1) -def get_current_resource(conn, resource, command): - # There will be exceptions but we want the calling code to handle them - if command == 'snapshot': - return conn.get_all_dbsnapshots(snapshot_id=resource)[0] - else: - return conn.get_all_dbinstances(resource)[0] +try: + import boto.rds2 + has_rds2 = True +except ImportError: + has_rds2 = False -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'restore'], required=True), - instance_name = dict(required=True), - source_instance = dict(required=False), - db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False), - size = dict(required=False), - instance_type = dict(aliases=['type'], required=False), - username = dict(required=False), - password = dict(no_log=True, required=False), - db_name = dict(required=False), - engine_version = dict(required=False), - parameter_group = dict(required=False), - license_model = dict(choices=['license-included', 'bring-your-own-license', 'general-public-license'], required=False), - multi_zone = dict(type='bool', default=False), - iops = dict(required=False), - security_groups = dict(required=False), - vpc_security_groups = dict(type='list', required=False), - port = dict(required=False), - upgrade = dict(type='bool', default=False), - option_group = dict(required=False), - maint_window = dict(required=False), - backup_window = dict(required=False), - backup_retention = dict(required=False), - zone = dict(aliases=['aws_zone', 'ec2_zone'], required=False), - subnet = dict(required=False), - wait = dict(type='bool', default=False), - wait_timeout = dict(default=300), - snapshot = dict(required=False), - apply_immediately = dict(type='bool', default=False), - new_instance_name = dict(required=False), - ) - ) +class RDSException(Exception): + def __init__(self, exc): + if hasattr(exc, 'error_message') and exc.error_message: + self.message = exc.error_message + self.code = exc.error_code + elif hasattr(exc, 'body') and 'Error' in exc.body: + self.message = exc.body['Error']['Message'] + self.code = exc.body['Error']['Code'] + else: + self.message = str(exc) + self.code = 'Unknown Error' - module = AnsibleModule( - argument_spec=argument_spec, - ) - command = module.params.get('command') - instance_name = module.params.get('instance_name') - source_instance = module.params.get('source_instance') - db_engine = module.params.get('db_engine') - size = module.params.get('size') - instance_type = module.params.get('instance_type') - username = module.params.get('username') - password = module.params.get('password') - db_name = module.params.get('db_name') - engine_version = module.params.get('engine_version') - parameter_group = module.params.get('parameter_group') - license_model = module.params.get('license_model') - multi_zone = module.params.get('multi_zone') - iops = module.params.get('iops') - security_groups = module.params.get('security_groups') - vpc_security_groups = module.params.get('vpc_security_groups') - port = module.params.get('port') - upgrade = module.params.get('upgrade') - option_group = module.params.get('option_group') - maint_window = module.params.get('maint_window') - subnet = module.params.get('subnet') - backup_window = module.params.get('backup_window') - backup_retention = module.params.get('backup_retention') - region = module.params.get('region') - zone = module.params.get('zone') - aws_secret_key = module.params.get('aws_secret_key') - aws_access_key = module.params.get('aws_access_key') - wait = module.params.get('wait') - wait_timeout = int(module.params.get('wait_timeout')) - snapshot = module.params.get('snapshot') - apply_immediately = module.params.get('apply_immediately') - new_instance_name = module.params.get('new_instance_name') +class RDSConnection: + def __init__(self, module, region, **aws_connect_params): + try: + self.connection = connect_to_aws(boto.rds, region, **aws_connect_params) + except boto.exception.BotoServerError, e: + module.fail_json(msg=e.error_message) - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - if not region: - module.fail_json(msg = str("region not specified and unable to determine region from EC2_REGION.")) + def get_db_instance(self, instancename): + try: + return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0]) + except boto.exception.BotoServerError, e: + return None - # connect to the rds endpoint - try: - conn = connect_to_aws(boto.rds, region, **aws_connect_params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) + def get_db_snapshot(self, snapshotid): + try: + return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0]) + except boto.exception.BotoServerError, e: + return None - def invalid_security_group_type(subnet): - if subnet: - return 'security_groups' - else: - return 'vpc_security_groups' + def create_db_instance(self, instance_name, size, instance_class, db_engine, + username, password, **params): + params['engine'] = db_engine + try: + result = self.connection.create_dbinstance(instance_name, size, instance_class, + username, password, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) - # Package up the optional parameters - params = {} + def create_db_instance_read_replica(self, instance_name, source_instance, **params): + try: + result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) - # Validate parameters for each command - if command == 'create': - required_vars = [ 'instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password' ] - invalid_vars = [ 'source_instance', 'snapshot', 'apply_immediately', 'new_instance_name' ] + [invalid_security_group_type(subnet)] + def delete_db_instance(self, instance_name, **params): + try: + result = self.connection.delete_dbinstance(instance_name, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) - elif command == 'replicate': - required_vars = [ 'instance_name', 'source_instance' ] - invalid_vars = [ 'db_engine', 'size', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'backup_window', 'backup_retention', 'subnet', 'snapshot', 'apply_immediately', 'new_instance_name' ] + def delete_db_snapshot(self, snapshot): + try: + result = self.connection.delete_dbsnapshot(snapshot) + return RDSSnapshot(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) - elif command == 'delete': - required_vars = [ 'instance_name' ] - invalid_vars = [ 'db_engine', 'size', 'instance_type', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups' ,'security_groups', 'option_group', 'maint_window', 'backup_window', 'backup_retention', 'port', 'upgrade', 'subnet', 'zone' , 'source_instance', 'apply_immediately', 'new_instance_name' ] + def modify_db_instance(self, instance_name, **params): + try: + result = self.connection.modify_dbinstance(instance_name, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) - elif command == 'facts': - required_vars = [ 'instance_name' ] - invalid_vars = [ 'db_engine', 'size', 'instance_type', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'backup_window', 'backup_retention', 'port', 'upgrade', 'subnet', 'zone', 'wait', 'source_instance' 'apply_immediately', 'new_instance_name' ] + def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): + try: + result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) - elif command == 'modify': - required_vars = [ 'instance_name' ] - if password: - params["master_password"] = password - invalid_vars = [ 'db_engine', 'username', 'db_name', 'engine_version', 'license_model', 'option_group', 'port', 'upgrade', 'subnet', 'zone', 'source_instance'] + def create_db_snapshot(self, snapshot, instance_name, **params): + try: + result = self.connection.create_dbsnapshot(snapshot, instance_name) + return RDSSnapshot(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) - elif command == 'promote': - required_vars = [ 'instance_name' ] - invalid_vars = [ 'db_engine', 'size', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'subnet', 'source_instance', 'snapshot', 'apply_immediately', 'new_instance_name' ] - - elif command == 'snapshot': - required_vars = [ 'instance_name', 'snapshot'] - invalid_vars = [ 'db_engine', 'size', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'subnet', 'source_instance', 'apply_immediately', 'new_instance_name' ] - - elif command == 'restore': - required_vars = [ 'instance_name', 'snapshot', 'instance_type' ] - invalid_vars = [ 'db_engine', 'db_name', 'username', 'password', 'engine_version', 'option_group', 'source_instance', 'apply_immediately', 'new_instance_name', 'vpc_security_groups', 'security_groups' ] - - for v in required_vars: - if not module.params.get(v): - module.fail_json(msg = str("Parameter %s required for %s command" % (v, command))) - - for v in invalid_vars: - if module.params.get(v): - module.fail_json(msg = str("Parameter %s invalid for %s command" % (v, command))) + def promote_read_replica(self, instance_name, **params): + try: + result = self.connection.promote_read_replica(instance_name, **params) + return RDSDBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) - if db_engine: - params["engine"] = db_engine - if port: - params["port"] = port +class RDS2Connection: + def __init__(self, module, region, **aws_connect_params): + try: + self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params) + except boto.exception.BotoServerError, e: + module.fail_json(msg=e.error_message) - if db_name: - params["db_name"] = db_name + def get_db_instance(self, instancename): + try: + dbinstances = self.connection.describe_db_instances(db_instance_identifier=instancename)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances'] + result = RDS2DBInstance(dbinstances[0]) + return result + except boto.rds2.exceptions.DBInstanceNotFound, e: + return None + except Exception, e: + raise e + + def get_db_snapshot(self, snapshotid): + try: + snapshots = self.connection.describe_db_snapshots(db_snapshot_identifier=snapshotid, snapshot_type='manual')['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots'] + result = RDS2Snapshot(snapshots[0]) + return result + except boto.rds2.exceptions.DBSnapshotNotFound, e: + return None + + def create_db_instance(self, instance_name, size, instance_class, db_engine, + username, password, **params): + try: + result = self.connection.create_db_instance(instance_name, size, instance_class, + db_engine, username, password, **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) - if parameter_group: - params["param_group"] = parameter_group + def create_db_instance_read_replica(self, instance_name, source_instance, **params): + try: + result = self.connection.create_db_instance_read_replica(instance_name, source_instance, **params)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) - if zone: - params["availability_zone"] = zone - - if maint_window: - params["preferred_maintenance_window"] = maint_window + def delete_db_instance(self, instance_name, **params): + try: + result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) - if backup_window: - params["preferred_backup_window"] = backup_window + def delete_db_snapshot(self, snapshot): + try: + result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot'] + return RDS2Snapshot(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) - if backup_retention: - params["backup_retention_period"] = backup_retention + def modify_db_instance(self, instance_name, **params): + try: + result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) - if multi_zone: - params["multi_az"] = multi_zone + def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params): + try: + result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) - if engine_version: - params["engine_version"] = engine_version + def create_db_snapshot(self, snapshot, instance_name, **params): + try: + result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot'] + return RDS2Snapshot(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) - if upgrade: - params["auto_minor_version_upgrade"] = upgrade + def promote_read_replica(self, instance_name, **params): + try: + result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance'] + return RDS2DBInstance(result) + except boto.exception.BotoServerError, e: + raise RDSException(e) + + +class RDSDBInstance: + def __init__(self, dbinstance): + self.instance = dbinstance + self.name = dbinstance.id + self.status = dbinstance.status + + def get_data(self): + d = { + 'id' : self.name, + 'create_time' : self.instance.create_time, + 'status' : self.status, + 'availability_zone' : self.instance.availability_zone, + 'backup_retention' : self.instance.backup_retention_period, + 'backup_window' : self.instance.preferred_backup_window, + 'maintenance_window' : self.instance.preferred_maintenance_window, + 'multi_zone' : self.instance.multi_az, + 'instance_type' : self.instance.instance_class, + 'username' : self.instance.master_username, + 'iops' : self.instance.iops + } + + # Endpoint exists only if the instance is available + if self.status == 'available': + d["endpoint"] = self.instance.endpoint[0] + d["port"] = self.instance.endpoint[1] + if self.instance.vpc_security_groups is not None: + d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups) + else: + d["vpc_security_groups"] = None + else: + d["endpoint"] = None + d["port"] = None + d["vpc_security_groups"] = None - if subnet: - params["db_subnet_group_name"] = subnet + # ReadReplicaSourceDBInstanceIdentifier may or may not exist + try: + d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier + except Exception, e: + d["replication_source"] = None + return d - if license_model: - params["license_model"] = license_model - if option_group: - params["option_group_name"] = option_group - if iops: - params["iops"] = iops - if security_groups: - params["security_groups"] = security_groups.split(',') +class RDS2DBInstance: + def __init__(self, dbinstance): + self.instance = dbinstance + if 'DBInstanceIdentifier' not in dbinstance: + self.name = None + else: + self.name = self.instance.get('DBInstanceIdentifier') + self.status = self.instance.get('DBInstanceStatus') + + def get_data(self): + d = { + 'id': self.name, + 'create_time': self.instance['InstanceCreateTime'], + 'status': self.status, + 'availability_zone': self.instance['AvailabilityZone'], + 'backup_retention': self.instance['BackupRetentionPeriod'], + 'maintenance_window': self.instance['PreferredMaintenanceWindow'], + 'multi_zone': self.instance['MultiAZ'], + 'instance_type': self.instance['DBInstanceClass'], + 'username': self.instance['MasterUsername'], + 'iops': self.instance['Iops'], + 'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier'] + } + if self.instance["VpcSecurityGroups"] is not None: + d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups']) + if self.status == 'available': + d['endpoint'] = self.instance["Endpoint"]["Address"] + d['port'] = self.instance["Endpoint"]["Port"] + else: + d['endpoint'] = None + d['port'] = None + + return d + + +class RDSSnapshot: + def __init__(self, snapshot): + self.snapshot = snapshot + self.name = snapshot.id + self.status = snapshot.status + + def get_data(self): + d = { + 'id' : self.name, + 'create_time' : self.snapshot.snapshot_create_time, + 'status' : self.status, + 'availability_zone' : self.snapshot.availability_zone, + 'instance_id' : self.snapshot.instance_id, + 'instance_created' : self.snapshot.instance_create_time, + } + # needs boto >= 2.21.0 + if hasattr(self.snapshot, 'snapshot_type'): + d["snapshot_type"] = self.snapshot.snapshot_type + if hasattr(self.snapshot, 'iops'): + d["iops"] = self.snapshot.iops + return d + + +class RDS2Snapshot: + def __init__(self, snapshot): + if 'DeleteDBSnapshotResponse' in snapshot: + self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot'] + else: + self.snapshot = snapshot + self.name = self.snapshot.get('DBSnapshotIdentifier') + self.status = self.snapshot.get('Status') + + def get_data(self): + d = { + 'id' : self.name, + 'create_time' : self.snapshot['SnapshotCreateTime'], + 'status' : self.status, + 'availability_zone' : self.snapshot['AvailabilityZone'], + 'instance_id' : self.snapshot['DBInstanceIdentifier'], + 'instance_created' : self.snapshot['InstanceCreateTime'], + 'snapshot_type' : self.snapshot['SnapshotType'], + 'iops' : self.snapshot['Iops'], + } + return d + + +def await_resource(conn, resource, status, module): + wait_timeout = module.params.get('wait_timeout') + time.time() + while wait_timeout > time.time() and resource.status != status: + time.sleep(5) + if wait_timeout <= time.time(): + module.fail_json(msg="Timeout waiting for resource %s" % resource.id) + if module.params.get('command') == 'snapshot': + # Temporary until all the rds2 commands have their responses parsed + if resource.name is None: + module.fail_json(msg="Problem with snapshot %s" % resource.snapshot) + resource = conn.get_db_snapshot(resource.name) + else: + # Temporary until all the rds2 commands have their responses parsed + if resource.name is None: + module.fail_json(msg="Problem with instance %s" % resource.instance) + resource = conn.get_db_instance(resource.name) + return resource + + +def create_db_instance(module, conn): + subnet = module.params.get('subnet') + required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password'] + valid_vars = ['backup_retention', 'backup_window', + 'character_set_name', 'db_name', 'engine_version', + 'instance_type', 'iops', 'license_model', 'maint_window', + 'multi_zone', 'option_group', 'parameter_group','port', + 'subnet', 'upgrade', 'zone'] + if module.params.get('subnet'): + valid_vars.append('vpc_security_groups') + else: + valid_vars.append('security_groups') + if has_rds2: + valid_vars.extend(['publicly_accessible', 'tags']) + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + + result = conn.get_db_instance(instance_name) + if result: + changed = False + else: + try: + result = conn.create_db_instance(instance_name, module.params.get('size'), + module.params.get('instance_type'), module.params.get('db_engine'), + module.params.get('username'), module.params.get('password'), **params) + changed = True + except RDSException, e: + module.fail_json(msg="failed to create instance: %s" % e.message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) - if vpc_security_groups: - groups_list = [] - for x in vpc_security_groups: - groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x)) - params["vpc_security_groups"] = groups_list + module.exit_json(changed=changed, instance=resource.get_data()) - if new_instance_name: - params["new_instance_id"] = new_instance_name - changed = True +def replicate_db_instance(module, conn): + required_vars = ['instance_name', 'source_instance'] + valid_vars = ['instance_type', 'port', 'upgrade', 'zone'] + if has_rds2: + valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags']) + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + source_instance = module.params.get('source_instance') - if command in ['create', 'restore', 'facts']: - try: - result = conn.get_all_dbinstances(instance_name)[0] - changed = False - except boto.exception.BotoServerError, e: - try: - if command == 'create': - result = conn.create_dbinstance(instance_name, size, instance_type, username, password, **params) - if command == 'restore': - result = conn.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params) - if command == 'facts': - module.fail_json(msg = "DB Instance %s does not exist" % instance_name) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - if command == 'snapshot': - try: - result = conn.get_all_dbsnapshots(snapshot)[0] - changed = False - except boto.exception.BotoServerError, e: - try: - result = conn.create_dbsnapshot(snapshot, instance_name) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - - if command == 'delete': - try: - result = conn.get_all_dbinstances(instance_name)[0] - if result.status == 'deleting': - module.exit_json(changed=False) - except boto.exception.BotoServerError, e: - module.exit_json(changed=False) + result = conn.get_db_instance(instance_name) + if result: + changed = False + else: try: + result = conn.create_db_instance_read_replica(instance_name, source_instance, **params) + changed = True + except RDSException, e: + module.fail_json(msg="failed to create replica instance: %s " % e.message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + module.exit_json(changed=changed, instance=resource.get_data()) + + +def delete_db_instance_or_snapshot(module, conn): + required_vars = [] + valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot'] + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + snapshot = module.params.get('snapshot') + + if not instance_name: + result = conn.get_db_snapshot(snapshot) + else: + result = conn.get_db_instance(instance_name) + if not result: + module.exit_json(changed=False) + if result.status == 'deleting': + module.exit_json(changed=False) + try: + if instance_name: if snapshot: params["skip_final_snapshot"] = False params["final_snapshot_id"] = snapshot else: params["skip_final_snapshot"] = True - result = conn.delete_dbinstance(instance_name, **params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) + result = conn.delete_db_instance(instance_name, **params) + else: + result = conn.delete_db_snapshot(snapshot) + except RDSException, e: + module.fail_json(msg="failed to delete instance: %s" % e.message) - if command == 'replicate': - try: - if instance_type: - params["instance_class"] = instance_type - result = conn.create_dbinstance_read_replica(instance_name, source_instance, **params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) + # If we're not waiting for a delete to complete then we're all done + # so just return + if not module.params.get('wait'): + module.exit_json(changed=True) + try: + resource = await_resource(conn, result, 'deleted', module) + module.exit_json(changed=True) + except RDSException, e: + if e.code == 'DBInstanceNotFound': + module.exit_json(changed=True) + else: + module.fail_json(msg=e.message) + except Exception, e: + module.fail_json(msg=str(e)) - if command == 'modify': - try: - params["apply_immediately"] = apply_immediately - result = conn.modify_dbinstance(instance_name, **params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) - if apply_immediately: - if new_instance_name: - # Wait until the new instance name is valid - found = 0 - while found == 0: - instances = conn.get_all_dbinstances() - for i in instances: - if i.id == new_instance_name: - instance_name = new_instance_name - found = 1 - if found == 0: - time.sleep(5) - - # The name of the database has now changed, so we have - # to force result to contain the new instance, otherwise - # the call below to get_current_resource will fail since it - # will be looking for the old instance name. - result.id = new_instance_name - else: - # Wait for a few seconds since it takes a while for AWS - # to change the instance from 'available' to 'modifying' + +def facts_db_instance_or_snapshot(module, conn): + required_vars = [] + valid_vars = ['instance_name', 'snapshot'] + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + snapshot = module.params.get('snapshot') + + if instance_name and snapshot: + module.fail_json(msg="facts must be called with either instance_name or snapshot, not both") + if instance_name: + resource = conn.get_db_instance(instance_name) + if not resource: + module.fail_json(msg="DB Instance %s does not exist" % instance_name) + if snapshot: + resource = conn.get_db_snapshot(snapshot) + if not resource: + module.fail_json(msg="DB snapshot %s does not exist" % snapshot) + + module.exit_json(changed=False, instance=resource.get_data()) + + +def modify_db_instance(module, conn): + required_vars = ['instance_name'] + valid_vars = ['apply_immediately', 'backup_retention', 'backup_window', + 'db_name', 'engine_version', 'instance_type', 'iops', 'license_model', + 'maint_window', 'multi_zone', 'new_instance_name', + 'option_group', 'parameter_group' 'password', 'size', 'upgrade'] + + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + new_instance_name = module.params.get('new_instance_name') + + try: + result = conn.modify_db_instance(instance_name, **params) + except RDSException, e: + module.fail_json(msg=e.message) + if params.get('apply_immediately'): + if new_instance_name: + # Wait until the new instance name is valid + new_instance = None + while not new_instance: + new_instance = conn.get_db_instance(new_instance_name) time.sleep(5) - if command == 'promote': + # Found instance but it briefly flicks to available + # before rebooting so let's wait until we see it rebooting + # before we check whether to 'wait' + result = await_resource(conn, new_instance, 'rebooting', module) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + # guess that this changed the DB, need a way to check + module.exit_json(changed=True, instance=resource.get_data()) + + +def promote_db_instance(module, conn): + required_vars = ['instance_name'] + valid_vars = ['backup_retention', 'backup_window'] + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + + result = conn.get_db_instance(instance_name) + if result.get_data().get('replication_source'): + changed = False + else: try: result = conn.promote_read_replica(instance_name, **params) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) + except RDSException, e: + module.fail_json(msg=e.message) - # If we're not waiting for a delete to complete then we're all done - # so just return - if command == 'delete' and not wait: - module.exit_json(changed=True) + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) - try: - resource = get_current_resource(conn, result.id, command) - except boto.exception.BotoServerError, e: - module.fail_json(msg = e.error_message) + module.exit_json(changed=changed, instance=resource.get_data()) - # Wait for the resource to be available if requested - if wait: - try: - wait_timeout = time.time() + wait_timeout - time.sleep(5) - while wait_timeout > time.time() and resource.status != 'available': - time.sleep(5) - if wait_timeout <= time.time(): - module.fail_json(msg = "Timeout waiting for resource %s" % resource.id) - resource = get_current_resource(conn, result.id, command) - except boto.exception.BotoServerError, e: - # If we're waiting for an instance to be deleted then - # get_all_dbinstances will eventually throw a - # DBInstanceNotFound error. - if command == 'delete' and e.error_code == 'DBInstanceNotFound': - module.exit_json(changed=True) - else: - module.fail_json(msg = e.error_message) - - # If we got here then pack up all the instance details to send - # back to ansible - if command == 'snapshot': - d = { - 'id' : resource.id, - 'create_time' : resource.snapshot_create_time, - 'status' : resource.status, - 'availability_zone' : resource.availability_zone, - 'instance_id' : resource.instance_id, - 'instance_created' : resource.instance_create_time, - } +def snapshot_db_instance(module, conn): + required_vars = ['instance_name', 'snapshot'] + valid_vars = ['tags'] + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + snapshot = module.params.get('snapshot') + changed = False + result = conn.get_db_snapshot(snapshot) + if not result: try: - d["snapshot_type"] = resource.snapshot_type - d["iops"] = resource.iops - except AttributeError, e: - pass # needs boto >= 2.21.0 - - return module.exit_json(changed=changed, snapshot=d) - - d = { - 'id' : resource.id, - 'create_time' : resource.create_time, - 'status' : resource.status, - 'availability_zone' : resource.availability_zone, - 'backup_retention' : resource.backup_retention_period, - 'backup_window' : resource.preferred_backup_window, - 'maintenance_window' : resource.preferred_maintenance_window, - 'multi_zone' : resource.multi_az, - 'instance_type' : resource.instance_class, - 'username' : resource.master_username, - 'iops' : resource.iops - } + result = conn.create_db_snapshot(snapshot, instance_name, **params) + changed = True + except RDSException, e: + module.fail_json(msg=e.message) - # Endpoint exists only if the instance is available - if resource.status == 'available' and command != 'snapshot': - d["endpoint"] = resource.endpoint[0] - d["port"] = resource.endpoint[1] - if resource.vpc_security_groups is not None: - d["vpc_security_groups"] = ','.join(x.vpc_group for x in resource.vpc_security_groups) - else: - d["vpc_security_groups"] = None + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) else: - d["endpoint"] = None - d["port"] = None - d["vpc_security_groups"] = None + resource = conn.get_db_snapshot(snapshot) - # ReadReplicaSourceDBInstanceIdentifier may or may not exist - try: - d["replication_source"] = resource.ReadReplicaSourceDBInstanceIdentifier - except Exception, e: - d["replication_source"] = None + module.exit_json(changed=changed, snapshot=resource.get_data()) - module.exit_json(changed=changed, instance=d) +def restore_db_instance(module, conn): + required_vars = ['instance_name', 'snapshot'] + valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone', + 'option_group', 'port', 'publicly_accessible', + 'subnet', 'tags', 'upgrade', 'zone'] + if has_rds2: + valid_vars.append('instance_type') + else: + required_vars.append('instance_type') + params = validate_parameters(required_vars, valid_vars, module) + instance_name = module.params.get('instance_name') + instance_type = module.params.get('instance_type') + snapshot = module.params.get('snapshot') + + changed = False + result = conn.get_db_instance(instance_name) + if not result: + try: + result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params) + changed = True + except RDSException, e: + module.fail_json(msg=e.message) + + if module.params.get('wait'): + resource = await_resource(conn, result, 'available', module) + else: + resource = conn.get_db_instance(instance_name) + + module.exit_json(changed=changed, instance=resource.get_data()) + + +def validate_parameters(required_vars, valid_vars, module): + command = module.params.get('command') + for v in required_vars: + if not module.params.get(v): + module.fail_json(msg="Parameter %s required for %s command" % (v, command)) + + # map to convert rds module options to boto rds and rds2 options + optional_params = { + 'port': 'port', + 'db_name': 'db_name', + 'zone': 'availability_zone', + 'maint_window': 'preferred_maintenance_window', + 'backup_window': 'preferred_backup_window', + 'backup_retention': 'backup_retention_period', + 'multi_zone': 'multi_az', + 'engine_version': 'engine_version', + 'upgrade': 'auto_minor_version_upgrade', + 'subnet': 'db_subnet_group_name', + 'license_model': 'license_model', + 'option_group': 'option_group_name', + 'iops': 'iops', + 'new_instance_name': 'new_instance_id', + 'apply_immediately': 'apply_immediately', + } + # map to convert rds module options to boto rds options + optional_params_rds = { + 'db_engine': 'engine', + 'password': 'master_password', + 'parameter_group': 'param_group', + 'instance_type': 'instance_class', + } + # map to convert rds module options to boto rds2 options + optional_params_rds2 = { + 'tags': 'tags', + 'publicly_accessible': 'publicly_accessible', + 'parameter_group': 'db_parameter_group_name', + 'character_set_name': 'character_set_name', + 'instance_type': 'db_instance_class', + 'password': 'master_user_password', + 'new_instance_name': 'new_db_instance_identifier', + } + if has_rds2: + optional_params.update(optional_params_rds2) + sec_group = 'db_security_groups' + else: + optional_params.update(optional_params_rds) + sec_group = 'security_groups' + # Check for options only supported with rds2 + for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()): + if module.params.get(k): + module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k) + + params = {} + for (k, v) in optional_params.items(): + if module.params.get(k) and k not in required_vars: + if k in valid_vars: + params[v] = module.params[k] + else: + module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command)) + + if module.params.get('security_groups'): + params[sec_group] = module.params.get('security_groups').split(',') + + if module.params.get('vpc_security_groups'): + groups_list = [] + for x in module.params.get('vpc_security_groups'): + groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x)) + params["vpc_security_groups"] = groups_list + + # Convert tags dict to list of tuples that rds2 expects + if 'tags' in params: + params['tags'] = module.params['tags'].items() + return params + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update(dict( + command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'restore'], required=True), + instance_name = dict(required=False), + source_instance = dict(required=False), + db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False), + size = dict(required=False), + instance_type = dict(aliases=['type'], required=False), + username = dict(required=False), + password = dict(no_log=True, required=False), + db_name = dict(required=False), + engine_version = dict(required=False), + parameter_group = dict(required=False), + license_model = dict(choices=['license-included', 'bring-your-own-license', 'general-public-license'], required=False), + multi_zone = dict(type='bool', default=False), + iops = dict(required=False), + security_groups = dict(required=False), + vpc_security_groups = dict(type='list', required=False), + port = dict(required=False), + upgrade = dict(type='bool', default=False), + option_group = dict(required=False), + maint_window = dict(required=False), + backup_window = dict(required=False), + backup_retention = dict(required=False), + zone = dict(aliases=['aws_zone', 'ec2_zone'], required=False), + subnet = dict(required=False), + wait = dict(type='bool', default=False), + wait_timeout = dict(type='int', default=300), + snapshot = dict(required=False), + apply_immediately = dict(type='bool', default=False), + new_instance_name = dict(required=False), + tags = dict(type='dict', required=False), + publicly_accessible = dict(required=False), + character_set_name = dict(required=False), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + ) + invocations = { + 'create': create_db_instance, + 'replicate': replicate_db_instance, + 'delete': delete_db_instance_or_snapshot, + 'facts': facts_db_instance_or_snapshot, + 'modify': modify_db_instance, + 'promote': promote_db_instance, + 'snapshot': snapshot_db_instance, + 'restore': restore_db_instance, + } + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg="region not specified and unable to determine region from EC2_REGION.") + + # connect to the rds endpoint + if has_rds2: + conn = RDS2Connection(module, region, **aws_connect_params) + else: + conn = RDSConnection(module, region, **aws_connect_params) + + invocations[module.params.get('command')](module, conn) + # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * diff --git a/cloud/amazon/rds_subnet_group.py b/cloud/amazon/rds_subnet_group.py index bba6cd86872..9731154f77c 100644 --- a/cloud/amazon/rds_subnet_group.py +++ b/cloud/amazon/rds_subnet_group.py @@ -79,8 +79,8 @@ EXAMPLES = ''' - subnet-aaaaaaaa - subnet-bbbbbbbb -# Remove a parameter group -- rds_param_group: +# Remove a subnet group +- rds_subnet_group: state: absent name: norwegian-blue ''' diff --git a/cloud/amazon/route53.py b/cloud/amazon/route53.py index 6fb44fcbf0f..50de5cc3b09 100644 --- a/cloud/amazon/route53.py +++ b/cloud/amazon/route53.py @@ -54,9 +54,23 @@ options: default: null aliases: [] choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS' ] + alias: + description: + - Indicates if this is an alias record. + required: false + version_added: 1.9 + default: False + aliases: [] + alias_hosted_zone_id: + description: + - The hosted zone identifier. + required: false + version_added: 1.9 + default: null + aliases: [] value: description: - - The new value when creating a DNS record. Multiple comma-spaced values are allowed. When deleting a record all values for the record must be specified or Route53 will not delete it. + - The new value when creating a DNS record. Multiple comma-spaced values are allowed for non-alias records. When deleting a record all values for the record must be specified or Route53 will not delete it. required: false default: null aliases: [] @@ -84,6 +98,12 @@ options: required: false default: 500 aliases: [] + private_zone: + description: + - If set to true, the private zone matching the requested name within the domain will be used if there are both public and private zones. The default is to use the public zone. + required: false + default: false + version_added: "1.9" requirements: [ "boto" ] author: Bruce Pennypacker ''' @@ -113,6 +133,7 @@ EXAMPLES = ''' command: delete zone: foo.com record: "{{ rec.set.record }}" + ttl: "{{ rec.set.ttl }}" type: "{{ rec.set.type }}" value: "{{ rec.set.value }}" @@ -136,6 +157,16 @@ EXAMPLES = ''' ttl: "7200" value: '"bar"' +# Add an alias record that points to an Amazon ELB: +- route53: + command=create + zone=foo.com + record=elb.foo.com + type=A + value="{{ elb_dns_name }}" + alias=yes + alias_hosted_zone_id="{{ elb_zone_id }}" + ''' @@ -167,25 +198,31 @@ def commit(changes, retry_interval): def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( - command = dict(choices=['get', 'create', 'delete'], required=True), - zone = dict(required=True), - record = dict(required=True), - ttl = dict(required=False, default=3600), - type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True), - value = dict(required=False), - overwrite = dict(required=False, type='bool'), - retry_interval = dict(required=False, default=500) + command = dict(choices=['get', 'create', 'delete'], required=True), + zone = dict(required=True), + record = dict(required=True), + ttl = dict(required=False, default=3600), + type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True), + alias = dict(required=False, type='bool'), + alias_hosted_zone_id = dict(required=False), + value = dict(required=False), + overwrite = dict(required=False, type='bool'), + retry_interval = dict(required=False, default=500), + private_zone = dict(required=False, type='bool', default=False), ) ) module = AnsibleModule(argument_spec=argument_spec) - command_in = module.params.get('command') - zone_in = module.params.get('zone') - ttl_in = module.params.get('ttl') - record_in = module.params.get('record') - type_in = module.params.get('type') - value_in = module.params.get('value') - retry_interval_in = module.params.get('retry_interval') + command_in = module.params.get('command') + zone_in = module.params.get('zone').lower() + ttl_in = module.params.get('ttl') + record_in = module.params.get('record').lower() + type_in = module.params.get('type') + value_in = module.params.get('value') + alias_in = module.params.get('alias') + alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id') + retry_interval_in = module.params.get('retry_interval') + private_zone_in = module.params.get('private_zone') ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) @@ -206,6 +243,11 @@ def main(): if command_in == 'create' or command_in == 'delete': if not value_in: module.fail_json(msg = "parameter 'value' required for create/delete") + elif alias_in: + if len(value_list) != 1: + module.fail_json(msg = "parameter 'value' must contain a single dns name for alias create/delete") + elif not alias_hosted_zone_id_in: + module.fail_json(msg = "parameter 'alias_hosted_zone_id' required for alias create/delete") # connect to the route53 endpoint try: @@ -217,8 +259,11 @@ def main(): zones = {} results = conn.get_all_hosted_zones() for r53zone in results['ListHostedZonesResponse']['HostedZones']: - zone_id = r53zone['Id'].replace('/hostedzone/', '') - zones[r53zone['Name']] = zone_id + # only save this zone id if the private status of the zone matches + # the private_zone_in boolean specified in the params + if module.boolean(r53zone['Config'].get('PrivateZone', False)) == private_zone_in: + zone_id = r53zone['Id'].replace('/hostedzone/', '') + zones[r53zone['Name']] = zone_id # Verify that the requested zone is already defined in Route53 if not zone_in in zones: @@ -235,7 +280,7 @@ def main(): decoded_name = rset.name.replace(r'\052', '*') decoded_name = decoded_name.replace(r'\100', '@') - if rset.type == type_in and decoded_name == record_in: + if rset.type == type_in and decoded_name.lower() == record_in.lower(): found_record = True record['zone'] = zone_in record['type'] = rset.type @@ -243,6 +288,15 @@ def main(): record['ttl'] = rset.ttl record['value'] = ','.join(sorted(rset.resource_records)) record['values'] = sorted(rset.resource_records) + if rset.alias_dns_name: + record['alias'] = True + record['value'] = rset.alias_dns_name + record['values'] = [rset.alias_dns_name] + record['alias_hosted_zone_id'] = rset.alias_hosted_zone_id + else: + record['alias'] = False + record['value'] = ','.join(sorted(rset.resource_records)) + record['values'] = sorted(rset.resource_records) if value_list == sorted(rset.resource_records) and int(record['ttl']) == ttl_in and command_in == 'create': module.exit_json(changed=False) @@ -260,12 +314,18 @@ def main(): else: change = changes.add_change("DELETE", record_in, type_in, record['ttl']) for v in record['values']: - change.add_value(v) + if record['alias']: + change.set_alias(record['alias_hosted_zone_id'], v) + else: + change.add_value(v) if command_in == 'create' or command_in == 'delete': change = changes.add_change(command_in.upper(), record_in, type_in, ttl_in) for v in value_list: - change.add_value(v) + if module.params['alias']: + change.set_alias(alias_hosted_zone_id_in, v) + else: + change.add_value(v) try: result = commit(changes, retry_interval_in) diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py index 7b914dd9117..8fe908e1514 100644 --- a/cloud/amazon/s3.py +++ b/cloud/amazon/s3.py @@ -125,6 +125,8 @@ import os import urlparse import hashlib +from boto.s3.connection import OrdinaryCallingFormat + try: import boto from boto.s3.connection import Location @@ -321,7 +323,6 @@ def main(): if is_fakes3(s3_url): try: fakes3 = urlparse.urlparse(s3_url) - from boto.s3.connection import OrdinaryCallingFormat s3 = boto.connect_s3( aws_access_key, aws_secret_key, @@ -339,20 +340,20 @@ def main(): module.fail_json(msg = str(e)) else: try: - s3 = boto.connect_s3(aws_access_key, aws_secret_key) + s3 = boto.s3.connect_to_region(location, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, is_secure=True, calling_format=OrdinaryCallingFormat()) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg = str(e)) - + # If our mode is a GET operation (download), go through the procedure as appropriate ... if mode == 'get': - + # First, we check to see if the bucket exists, we get "bucket" returned. bucketrtn = bucket_check(module, s3, bucket) if bucketrtn is False: module.fail_json(msg="Target bucket cannot be found", failed=True) # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check. - keyrtn = key_check(module, s3, bucket, obj) + keyrtn = key_check(module, s3, bucket, obj) if keyrtn is False: module.fail_json(msg="Target key cannot be found", failed=True) @@ -376,8 +377,8 @@ def main(): if overwrite is True: download_s3file(module, s3, bucket, obj, dest) else: - module.fail_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True) - + module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.") + # Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message. if sum_matches is True and overwrite is False: module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False) @@ -387,8 +388,8 @@ def main(): download_s3file(module, s3, bucket, obj, dest) # If sum does not match but the destination exists, we - - # if our mode is a PUT operation (upload), go through the procedure as appropriate ... + + # if our mode is a PUT operation (upload), go through the procedure as appropriate ... if mode == 'put': # Use this snippet to debug through conditionals: @@ -399,7 +400,7 @@ def main(): pathrtn = path_check(src) if pathrtn is False: module.fail_json(msg="Local object for PUT does not exist", failed=True) - + # Lets check to see if bucket exists to get ground truth. bucketrtn = bucket_check(module, s3, bucket) if bucketrtn is True: @@ -420,7 +421,7 @@ def main(): if overwrite is True: upload_s3file(module, s3, bucket, obj, src, expiry, metadata) else: - module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True) + module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.") # If neither exist (based on bucket existence), we can create both. if bucketrtn is False and pathrtn is True: diff --git a/cloud/azure/azure.py b/cloud/azure/azure.py index 1679fbc45d1..aba246ec1ad 100644 --- a/cloud/azure/azure.py +++ b/cloud/azure/azure.py @@ -173,7 +173,20 @@ AZURE_ROLE_SIZES = ['ExtraSmall', 'Basic_A1', 'Basic_A2', 'Basic_A3', - 'Basic_A4'] + 'Basic_A4', + 'Standard_D1', + 'Standard_D2', + 'Standard_D3', + 'Standard_D4', + 'Standard_D11', + 'Standard_D12', + 'Standard_D13', + 'Standard_D14', + 'Standard_G1', + 'Standard_G2', + 'Sandard_G3', + 'Standard_G4', + 'Standard_G5'] try: import azure as windows_azure @@ -281,6 +294,7 @@ def create_virtual_machine(module, azure): network_config = ConfigurationSetInputEndpoints() network_config.configuration_set_type = 'NetworkConfiguration' network_config.subnet_names = [] + network_config.public_ips = None for port in endpoints: network_config.input_endpoints.append(ConfigurationSetInputEndpoint(name='TCP-%s' % port, protocol='TCP', @@ -442,6 +456,8 @@ def main(): module.fail_json(msg='location parameter is required for new instance') if not module.params.get('storage_account'): module.fail_json(msg='storage_account parameter is required for new instance') + if not module.params.get('password'): + module.fail_json(msg='password parameter is required for new instance') (changed, public_dns_name, deployment) = create_virtual_machine(module, azure) module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__))) diff --git a/cloud/digital_ocean/digital_ocean.py b/cloud/digital_ocean/digital_ocean.py index efebf5f1bcf..f3a904e8b24 100644 --- a/cloud/digital_ocean/digital_ocean.py +++ b/cloud/digital_ocean/digital_ocean.py @@ -236,7 +236,9 @@ class Droplet(JsonfyMixIn): @classmethod def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False): - json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking, backups_enabled) + private_networking_lower = str(private_networking).lower() + backups_enabled_lower = str(backups_enabled).lower() + json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking_lower, backups_enabled_lower) droplet = cls(json) return droplet diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py index bbcb73df99b..6e571a7ba5d 100644 --- a/cloud/docker/docker.py +++ b/cloud/docker/docker.py @@ -27,334 +27,314 @@ module: docker version_added: "1.4" short_description: manage docker containers description: - - Manage the life cycle of docker containers. + - Manage the life cycle of docker containers. options: count: description: - - Set number of containers to run - required: False + - Number of matching containers that should be in the desired state. default: 1 - aliases: [] image: description: - - Set container image to use + - Container image used to match and launch containers. required: true - default: null - aliases: [] + pull: + description: + - Control when container images are updated from the C(docker_url) registry. + If "missing," images will be pulled only when missing from the host; + if '"always," the registry will be checked for a newer version of the + image' each time the task executes. + default: missing + choices: [ "missing", "always" ] + version_added: "1.9" command: description: - - Set command to run in a container on startup - required: false + - Command used to match and launch containers. default: null - aliases: [] name: description: - - Set name for container (used to find single container or to provide links) - required: false + - Name used to match and uniquely name launched containers. Explicit names + are used to uniquely identify a single container or to link among + containers. Mutually exclusive with a "count" other than "1". default: null - aliases: [] version_added: "1.5" ports: description: - - Set private to public port mapping specification using docker CLI-style syntax [([:[host_port]])|():][/udp] - required: false + - List containing private to public port mapping specification. Use docker + - 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000)' + - where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is + - a host interface. default: null - aliases: [] version_added: "1.5" expose: description: - - Set container ports to expose for port mappings or links. (If the port is already exposed using EXPOSE in a Dockerfile, you don't need to expose it again.) - required: false + - List of additional container ports to expose for port mappings or links. + If the port is already exposed using EXPOSE in a Dockerfile, you don't + need to expose it again. default: null - aliases: [] version_added: "1.5" publish_all_ports: description: - - Publish all exposed ports to the host interfaces - required: false + - Publish all exposed ports to the host interfaces. default: false - aliases: [] version_added: "1.5" volumes: description: - - Set volume(s) to mount on the container - required: false + - List of volumes to mount within the container using docker CLI-style + - 'syntax: C(/host:/container[:mode]) where "mode" may be "rw" or "ro".' default: null - aliases: [] volumes_from: description: - - Set shared volume(s) from another container - required: false + - List of names of containers to mount volumes from. default: null - aliases: [] links: description: - - Link container(s) to other container(s) (e.g. links=redis,postgresql:db) - required: false + - List of other containers to link within this container with an optional + - 'alias. Use docker CLI-style syntax: C(redis:myredis).' default: null - aliases: [] version_added: "1.5" memory_limit: description: - - Set RAM allocated to container - required: false - default: null - aliases: [] - default: 256MB + - RAM allocated to the container as a number of bytes or as a human-readable + string like "512MB". Leave as "0" to specify no limit. + default: 0 docker_url: description: - - URL of docker host to issue commands to - required: false - default: unix://var/run/docker.sock - aliases: [] + - URL of the host running the docker daemon. This will default to the env + var DOCKER_HOST if unspecified. + default: ${DOCKER_HOST} or unix://var/run/docker.sock + docker_tls_cert: + description: + - Path to a PEM-encoded client certificate to secure the Docker connection. + default: ${DOCKER_CERT_PATH}/cert.pem + docker_tls_key: + description: + - Path to a PEM-encoded client key to secure the Docker connection. + default: ${DOCKER_CERT_PATH}/key.pem + docker_tls_cacert: + description: + - Path to a PEM-encoded certificate authority to secure the Docker connection. + default: ${DOCKER_CERT_PATH}/ca.pem docker_api_version: description: - - Remote API version to use. This defaults to the current default as specified by docker-py. - required: false + - Remote API version to use. This defaults to the current default as + specified by docker-py. default: docker-py default remote API version - aliases: [] version_added: "1.8" username: description: - - Set remote API username - required: false + - Remote API username. default: null - aliases: [] password: description: - - Set remote API password - required: false + - Remote API password. default: null - aliases: [] email: description: - - Set remote API email - required: false + - Remote API email. default: null - aliases: [] hostname: description: - - Set container hostname - required: false + - Container hostname. + default: null + domainname: + description: + - Container domain name. default: null - aliases: [] env: description: - - Set environment variables (e.g. env="PASSWORD=sEcRe7,WORKERS=4") - required: false + - Pass a dict of environment variables to the container. default: null - aliases: [] dns: description: - - Set custom DNS servers for the container + - List of custom DNS servers for the container. required: false default: null - aliases: [] detach: description: - - Enable detached mode on start up, leaves container running in background - required: false + - Enable detached mode to leave the container running in background. default: true - aliases: [] state: description: - - Set the state of the container + - Assert the container's desired state. "present" only asserts that the + matching containers exist. "started" asserts that the matching + containers both exist and are running, but takes no action if any + configuration has changed. "reloaded" asserts that all matching + containers are running and restarts any that have any images or + configuration out of date. "restarted" unconditionally restarts (or + starts) the matching containers. "stopped" and '"killed" stop and kill + all matching containers. "absent" stops and then' removes any matching + containers. required: false - default: present - choices: [ "present", "running", "stopped", "absent", "killed", "restarted" ] - aliases: [] + default: started + choices: + - present + - started + - reloaded + - restarted + - stopped + - killed + - absent privileged: description: - - Set whether the container should run in privileged mode - required: false + - Whether the container should run in privileged mode or not. default: false - aliases: [] lxc_conf: description: - - LXC config parameters, e.g. lxc.aa_profile:unconfined - required: false - default: - aliases: [] - name: - description: - - Set the name of the container (cannot use with count) - required: false + - LXC configuration parameters, such as C(lxc.aa_profile:unconfined). default: null - aliases: [] - version_added: "1.5" stdin_open: description: - - Keep stdin open - required: false + - Keep stdin open after a container is launched. default: false - aliases: [] version_added: "1.6" tty: description: - - Allocate a pseudo-tty - required: false + - Allocate a pseudo-tty within the container. default: false - aliases: [] version_added: "1.6" net: description: - - Set Network mode for the container (bridge, none, container:, host). Requires docker >= 0.11. - required: false + - 'Network mode for the launched container: bridge, none, container:' + - or host. Requires docker >= 0.11. default: false - aliases: [] version_added: "1.8" - registry: + pid: description: - - The remote registry URL to use for pulling images. + - Set the PID namespace mode for the container (currently only supports 'host'). Requires docker-py >= 1.0.0 and docker >= 1.4.1. required: false - default: '' + default: None + aliases: [] + version_added: "1.9" + registry: + description: + - Remote registry URL to pull images from. + default: DockerHub aliases: [] version_added: "1.8" restart_policy: description: - - Set the container restart policy - required: false - default: false - aliases: [] + - Container restart policy. + choices: ["no", "on-failure", "always"] + default: null version_added: "1.9" restart_policy_retry: description: - - Set the retry limit for container restart policy - required: false - default: false - aliases: [] + - Maximum number of times to restart a container. Leave as "0" for unlimited + retries. + default: 0 version_added: "1.9" insecure_registry: description: - - Use insecure private registry by HTTP instead of HTTPS (needed for docker-py >= 0.5.0). - required: false + - Use insecure private registry by HTTP instead of HTTPS. Needed for + docker-py >= 0.5.0. default: false - aliases: [] version_added: "1.9" -author: Cove Schneider, Joshua Conner, Pavel Antonov +author: Cove Schneider, Joshua Conner, Pavel Antonov, Ash Wilson requirements: [ "docker-py >= 0.3.0", "docker >= 0.10.0" ] ''' EXAMPLES = ''' -Start one docker container running tomcat in each host of the web group and bind tomcat's listening port to 8080 -on the host: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat servers - docker: image=centos command="service tomcat6 start" ports=8080 - -The tomcat server's port is NAT'ed to a dynamic port on the host, but you can determine which port the server was -mapped to using docker_containers: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat servers - docker: image=centos command="service tomcat6 start" ports=8080 count=5 - - name: Display IP address and port mappings for containers - debug: msg={{inventory_hostname}}:{{item['HostConfig']['PortBindings']['8080/tcp'][0]['HostPort']}} - with_items: docker_containers - -Just as in the previous example, but iterates over the list of docker containers with a sequence: - -- hosts: web - sudo: yes - vars: - start_containers_count: 5 - tasks: - - name: run tomcat servers - docker: image=centos command="service tomcat6 start" ports=8080 count={{start_containers_count}} - - name: Display IP address and port mappings for containers - debug: msg="{{inventory_hostname}}:{{docker_containers[{{item}}]['HostConfig']['PortBindings']['8080/tcp'][0]['HostPort']}}" - with_sequence: start=0 end={{start_containers_count - 1}} - -Stop, remove all of the running tomcat containers and list the exit code from the stopped containers: - -- hosts: web - sudo: yes - tasks: - - name: stop tomcat servers - docker: image=centos command="service tomcat6 start" state=absent - - name: Display return codes from stopped containers - debug: msg="Returned {{inventory_hostname}}:{{item}}" - with_items: docker_containers - -Create a named container: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat server - docker: image=centos name=tomcat command="service tomcat6 start" ports=8080 - -Create multiple named containers: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat servers - docker: image=centos name={{item}} command="service tomcat6 start" ports=8080 - with_items: - - crookshank - - snowbell - - heathcliff - - felix - - sylvester - -Create containers named in a sequence: - -- hosts: web - sudo: yes - tasks: - - name: run tomcat servers - docker: image=centos name={{item}} command="service tomcat6 start" ports=8080 - with_sequence: start=1 end=5 format=tomcat_%d.example.com - -Create two linked containers: - -- hosts: web - sudo: yes - tasks: - - name: ensure redis container is running - docker: image=crosbymichael/redis name=redis - - - name: ensure redis_ambassador container is running - docker: image=svendowideit/ambassador ports=6379:6379 links=redis:redis name=redis_ambassador_ansible - -Create containers with options specified as key-value pairs and lists: - -- hosts: web - sudo: yes - tasks: - - docker: - image: namespace/image_name - links: - - postgresql:db - - redis:redis - - -Create containers with options specified as strings and lists as comma-separated strings: - -- hosts: web - sudo: yes - tasks: - docker: image=namespace/image_name links=postgresql:db,redis:redis - -Create a container with no networking: - -- hosts: web - sudo: yes - tasks: - docker: image=namespace/image_name net=none - +# Containers are matched either by name (if provided) or by an exact match of +# the image they were launched with and the command they're running. The module +# can accept either a name to target a container uniquely, or a count to operate +# on multiple containers at once when it makes sense to do so. + +# Ensure that a data container with the name "mydata" exists. If no container +# by this name exists, it will be created, but not started. + +- name: data container + docker: + name: mydata + image: busybox + state: present + volumes: + - /data + +# Ensure that a Redis server is running, using the volume from the data +# container. Expose the default Redis port. + +- name: redis container + docker: + name: myredis + image: redis + command: redis-server --appendonly yes + state: started + expose: + - 6379 + volumes_from: + - mydata + +# Ensure that a container of your application server is running. This will: +# - pull the latest version of your application image from DockerHub. +# - ensure that a container is running with the specified name and exact image. +# If any configuration options have changed, the existing container will be +# stopped and removed, and a new one will be launched in its place. +# - link this container to the existing redis container launched above with +# an alias. +# - bind TCP port 9000 within the container to port 8080 on all interfaces +# on the host. +# - bind UDP port 9001 within the container to port 8081 on the host, only +# listening on localhost. +# - set the environment variable SECRET_KEY to "ssssh". + +- name: application container + docker: + name: myapplication + image: someuser/appimage + state: reloaded + pull: always + links: + - "myredis:aliasedredis" + ports: + - "8080:9000" + - "127.0.0.1:8081:9001/udp" + env: + SECRET_KEY: ssssh + +# Ensure that exactly five containers of another server are running with this +# exact image and command. If fewer than five are running, more will be launched; +# if more are running, the excess will be stopped. + +- name: load-balanced containers + docker: + state: reloaded + count: 5 + image: someuser/anotherappimage + command: sleep 1d + +# Unconditionally restart a service container. This may be useful within a +# handler, for example. + +- name: application service + docker: + name: myservice + image: someuser/serviceimage + state: restarted + +# Stop all containers running the specified image. + +- name: obsolete container + docker: + image: someuser/oldandbusted + state: stopped + +# Stop and remove a container with the specified name. + +- name: obsolete container + docker: + name: ohno + image: someuser/oldandbusted + state: absent ''' HAS_DOCKER_PY = True import sys +import json +import re +import os +import shlex from urlparse import urlparse try: import docker.client @@ -387,9 +367,11 @@ def _human_to_bytes(number): print "failed=True msg='Could not convert %s to integer'" % (number) sys.exit(1) + def _ansible_facts(container_list): return {"docker_containers": container_list} + def _docker_id_quirk(inspect): # XXX: some quirk in docker if 'ID' in inspect: @@ -416,6 +398,13 @@ def get_split_image_tag(image): return resource, tag + +def is_running(container): + '''Return True if an inspected container is in a state we consider "running."''' + + return container['State']['Running'] == True and not container['State'].get('Ghost', False) + + def get_docker_py_versioninfo(): if hasattr(docker, '__version__'): # a '__version__' attribute was added to the module but not until @@ -443,7 +432,8 @@ def get_docker_py_versioninfo(): # than 0.3.0 so it's okay to lie here. version = (0,) - return version + return tuple(version) + def check_dependencies(module): """ @@ -460,14 +450,18 @@ def check_dependencies(module): class DockerManager(object): - counters = {'created':0, 'started':0, 'stopped':0, 'killed':0, 'removed':0, 'restarted':0, 'pull':0} + counters = dict( + created=0, started=0, stopped=0, killed=0, removed=0, restarted=0, pulled=0 + ) + reload_reasons = [] _capabilities = set() + # Map optional parameters to minimum (docker-py version, server APIVersion) # docker-py version is a tuple of ints because we have to compare them # server APIVersion is passed to a docker-py function that takes strings _cap_ver_req = { 'dns': ((0, 3, 0), '1.10'), - 'volume_from': ((0, 3, 0), '1.10'), + 'volumes_from': ((0, 3, 0), '1.10'), 'restart_policy': ((0, 5, 0), '1.14'), # Clientside only 'insecure_registry': ((0, 5, 0), '0.0') @@ -523,10 +517,50 @@ class DockerManager(object): self.env = self.module.params.get('env', None) - # connect to docker server - docker_url = urlparse(module.params.get('docker_url')) + # Connect to the docker server using any configured host and TLS settings. + + env_host = os.getenv('DOCKER_HOST') + env_cert_path = os.getenv('DOCKER_CERT_PATH') + + docker_url = module.params.get('docker_url') + if not docker_url: + if env_host: + docker_url = env_host + else: + docker_url = 'unix://var/run/docker.sock' + + docker_tls_cert = module.params.get('docker_tls_cert') + if not docker_tls_cert and env_cert_path: + docker_tls_cert = os.path.join(env_cert_path, 'cert.pem') + + docker_tls_key = module.params.get('docker_tls_key') + if not docker_tls_key and env_cert_path: + docker_tls_key = os.path.join(env_cert_path, 'key.pem') + + docker_tls_cacert = module.params.get('docker_tls_cacert') + if not docker_tls_cacert and env_cert_path: + docker_tls_cacert = os.path.join(env_cert_path, 'ca.pem') + docker_api_version = module.params.get('docker_api_version') - self.client = docker.Client(base_url=docker_url.geturl(), version=docker_api_version) + if not docker_api_version: + docker_api_version=docker.client.DEFAULT_DOCKER_API_VERSION + + tls_config = None + if docker_tls_cert or docker_tls_key or docker_tls_cacert: + # See https://github.com/docker/docker-py/blob/d39da11/docker/utils/utils.py#L279-L296 + docker_url = docker_url.replace('tcp://', 'https://') + verify = docker_tls_cacert is not None + + tls_config = docker.tls.TLSConfig( + client_cert=(docker_tls_cert, docker_tls_key), + ca_cert=docker_tls_cacert, + verify=verify, + assert_hostname=False + ) + + self.client = docker.Client(base_url=docker_url, + version=docker_api_version, + tls=tls_config) self.docker_py_versioninfo = get_docker_py_versioninfo() @@ -584,7 +618,6 @@ class DockerManager(object): return processed_links - def get_exposed_ports(self, expose_list): """ Parse the ports and protocols (TCP/UDP) to expose in the docker-py `create_container` call from the docker CLI-style syntax. @@ -603,7 +636,6 @@ class DockerManager(object): else: return None - def get_port_bindings(self, ports): """ Parse the `ports` string into a port bindings dict for the `start_container` call. @@ -646,6 +678,37 @@ class DockerManager(object): return binds + def get_summary_message(self): + ''' + Generate a message that briefly describes the actions taken by this + task, in English. + ''' + + parts = [] + for k, v in self.counters.iteritems(): + if v == 0: + continue + + if v == 1: + plural = "" + else: + plural = "s" + parts.append("%s %d container%s" % (k, v, plural)) + + if parts: + return ", ".join(parts) + "." + else: + return "No action taken." + + def get_reload_reason_message(self): + ''' + Generate a message describing why any reloaded containers were reloaded. + ''' + + if self.reload_reasons: + return ", ".join(self.reload_reasons) + else: + return None def get_summary_counters_msg(self): msg = "" @@ -664,6 +727,26 @@ class DockerManager(object): return False + def get_inspect_image(self): + try: + return self.client.inspect_image(self.module.params.get('image')) + except DockerAPIError as e: + if e.response.status_code == 404: + return None + else: + raise e + + def get_image_repo_tags(self): + image, tag = get_split_image_tag(self.module.params.get('image')) + if tag is None: + tag = 'latest' + resource = '%s:%s' % (image, tag) + + for image in self.client.images(name=image): + if resource in image.get('RepoTags', []): + return image['RepoTags'] + return None + def get_inspect_containers(self, containers): inspect = [] for i in containers: @@ -673,9 +756,286 @@ class DockerManager(object): return inspect + def get_differing_containers(self): + """ + Inspect all matching, running containers, and return those that were + started with parameters that differ from the ones that are provided + during this module run. A list containing the differing + containers will be returned, and a short string describing the specific + difference encountered in each container will be appended to + reload_reasons. + + This generates the set of containers that need to be stopped and + started with new parameters with state=reloaded. + """ + + running = self.get_running_containers() + current = self.get_inspect_containers(running) + + image = self.get_inspect_image() + if image is None: + # The image isn't present. Assume that we're about to pull a new + # tag and *everything* will be restarted. + # + # This will give false positives if you untag an image on the host + # and there's nothing more to pull. + return current + + differing = [] + + for container in current: + + # IMAGE + # Compare the image by ID rather than name, so that containers + # will be restarted when new versions of an existing image are + # pulled. + if container['Image'] != image['Id']: + self.reload_reasons.append('image ({0} => {1})'.format(container['Image'], image['Id'])) + differing.append(container) + continue + + # COMMAND + + expected_command = self.module.params.get('command') + if expected_command: + expected_command = shlex.split(expected_command) + actual_command = container["Config"]["Cmd"] + + if actual_command != expected_command: + self.reload_reasons.append('command ({0} => {1})'.format(actual_command, expected_command)) + differing.append(container) + continue + + # EXPOSED PORTS + expected_exposed_ports = set((image['ContainerConfig']['ExposedPorts'] or {}).keys()) + for p in (self.exposed_ports or []): + expected_exposed_ports.add("/".join(p)) + + actually_exposed_ports = set((container["Config"]["ExposedPorts"] or {}).keys()) + + if actually_exposed_ports != expected_exposed_ports: + self.reload_reasons.append('exposed_ports ({0} => {1})'.format(actually_exposed_ports, expected_exposed_ports)) + differing.append(container) + continue + + # VOLUMES + + expected_volume_keys = set((image['ContainerConfig']['Volumes'] or {}).keys()) + if self.volumes: + expected_volume_keys.update(self.volumes.keys()) + + actual_volume_keys = set((container['Config']['Volumes'] or {}).keys()) + + if actual_volume_keys != expected_volume_keys: + self.reload_reasons.append('volumes ({0} => {1})'.format(actual_volume_keys, expected_volume_keys)) + differing.append(container) + continue + + # MEM_LIMIT + + expected_mem = _human_to_bytes(self.module.params.get('memory_limit')) + actual_mem = container['Config']['Memory'] + + if expected_mem and actual_mem != expected_mem: + self.reload_reasons.append('memory ({0} => {1})'.format(actual_mem, expected_mem)) + differing.append(container) + continue + + # ENVIRONMENT + # actual_env is likely to include environment variables injected by + # the Dockerfile. + + expected_env = {} + + for image_env in image['ContainerConfig']['Env'] or []: + name, value = image_env.split('=', 1) + expected_env[name] = value + + if self.env: + for name, value in self.env.iteritems(): + expected_env[name] = str(value) + + actual_env = {} + for container_env in container['Config']['Env'] or []: + name, value = container_env.split('=', 1) + actual_env[name] = value + + if actual_env != expected_env: + # Don't include the environment difference in the output. + self.reload_reasons.append('environment {0} => {1}'.format(actual_env, expected_env)) + differing.append(container) + continue + + # HOSTNAME + + expected_hostname = self.module.params.get('hostname') + actual_hostname = container['Config']['Hostname'] + if expected_hostname and actual_hostname != expected_hostname: + self.reload_reasons.append('hostname ({0} => {1})'.format(actual_hostname, expected_hostname)) + differing.append(container) + continue + + # DOMAINNAME + + expected_domainname = self.module.params.get('domainname') + actual_domainname = container['Config']['Domainname'] + if expected_domainname and actual_domainname != expected_domainname: + self.reload_reasons.append('domainname ({0} => {1})'.format(actual_domainname, expected_domainname)) + differing.append(container) + continue + + # DETACH + + # We don't have to check for undetached containers. If it wasn't + # detached, it would have stopped before the playbook continued! + + # NAME + + # We also don't have to check name, because this is one of the + # criteria that's used to determine which container(s) match in + # the first place. + + # STDIN_OPEN + + expected_stdin_open = self.module.params.get('stdin_open') + actual_stdin_open = container['Config']['AttachStdin'] + if actual_stdin_open != expected_stdin_open: + self.reload_reasons.append('stdin_open ({0} => {1})'.format(actual_stdin_open, expected_stdin_open)) + differing.append(container) + continue + + # TTY + + expected_tty = self.module.params.get('tty') + actual_tty = container['Config']['Tty'] + if actual_tty != expected_tty: + self.reload_reasons.append('tty ({0} => {1})'.format(actual_tty, expected_tty)) + differing.append(container) + continue + + # -- "start" call differences -- + + # LXC_CONF + + if self.lxc_conf: + expected_lxc = set(self.lxc_conf) + actual_lxc = set(container['HostConfig']['LxcConf'] or []) + if actual_lxc != expected_lxc: + self.reload_reasons.append('lxc_conf ({0} => {1})'.format(actual_lxc, expected_lxc)) + differing.append(container) + continue + + # BINDS + + expected_binds = set() + if self.binds: + for host_path, config in self.binds.iteritems(): + if isinstance(config, dict): + container_path = config['bind'] + if config['ro']: + mode = 'ro' + else: + mode = 'rw' + else: + container_path = config + mode = 'rw' + expected_binds.add("{0}:{1}:{2}".format(host_path, container_path, mode)) + + actual_binds = set() + for bind in (container['HostConfig']['Binds'] or []): + if len(bind.split(':')) == 2: + actual_binds.add(bind + ":rw") + else: + actual_binds.add(bind) + + if actual_binds != expected_binds: + self.reload_reasons.append('binds ({0} => {1})'.format(actual_binds, expected_binds)) + differing.append(container) + continue + + # PORT BINDINGS + + expected_bound_ports = {} + if self.port_bindings: + for container_port, config in self.port_bindings.iteritems(): + if isinstance(container_port, int): + container_port = "{0}/tcp".format(container_port) + bind = {} + if len(config) == 1: + bind['HostIp'] = "0.0.0.0" + bind['HostPort'] = "" + else: + bind['HostIp'] = config[0] + bind['HostPort'] = str(config[1]) + + expected_bound_ports[container_port] = [bind] + + actual_bound_ports = container['HostConfig']['PortBindings'] or {} + + if actual_bound_ports != expected_bound_ports: + self.reload_reasons.append('port bindings ({0} => {1})'.format(actual_bound_ports, expected_bound_ports)) + differing.append(container) + continue + + # PUBLISHING ALL PORTS + + # What we really care about is the set of ports that is actually + # published. That should be caught above. + + # PRIVILEGED + + expected_privileged = self.module.params.get('privileged') + actual_privileged = container['HostConfig']['Privileged'] + if actual_privileged != expected_privileged: + self.reload_reasons.append('privileged ({0} => {1})'.format(actual_privileged, expected_privileged)) + differing.append(container) + continue + + # LINKS + + expected_links = set() + for link, alias in (self.links or {}).iteritems(): + expected_links.add("/{0}:{1}/{2}".format(link, container["Name"], alias)) + + actual_links = set(container['HostConfig']['Links'] or []) + if actual_links != expected_links: + self.reload_reasons.append('links ({0} => {1})'.format(actual_links, expected_links)) + differing.append(container) + continue + + # NETWORK MODE + + expected_netmode = self.module.params.get('net') or '' + actual_netmode = container['HostConfig']['NetworkMode'] + if actual_netmode != expected_netmode: + self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode)) + differing.append(container) + continue + + # DNS + + expected_dns = set(self.module.params.get('dns') or []) + actual_dns = set(container['HostConfig']['Dns'] or []) + if actual_dns != expected_dns: + self.reload_reasons.append('dns ({0} => {1})'.format(actual_dns, expected_dns)) + differing.append(container) + continue + + # VOLUMES_FROM + + expected_volumes_from = set(self.module.params.get('volumes_from') or []) + actual_volumes_from = set(container['HostConfig']['VolumesFrom'] or []) + if actual_volumes_from != expected_volumes_from: + self.reload_reasons.append('volumes_from ({0} => {1})'.format(actual_volumes_from, expected_volumes_from)) + differing.append(container) + + return differing + def get_deployed_containers(self): - """determine which images/commands are running already""" - image = self.module.params.get('image') + """ + Return any matching containers that are already present. + """ + command = self.module.params.get('command') if command: command = command.strip() @@ -684,37 +1044,76 @@ class DockerManager(object): name = '/' + name deployed = [] - # if we weren't given a tag with the image, we need to only compare on the image name, as that - # docker will give us back the full image name including a tag in the container list if one exists. - image, tag = get_split_image_tag(image) + # "images" will be a collection of equivalent "name:tag" image names + # that map to the same Docker image. + inspected = self.get_inspect_image() + if inspected: + repo_tags = self.get_image_repo_tags() + else: + image, tag = get_split_image_tag(self.module.params.get('image')) + repo_tags = [':'.join([image, tag])] for i in self.client.containers(all=True): - running_image, running_tag = get_split_image_tag(i['Image']) + running_image = i['Image'] running_command = i['Command'].strip() + match = False + + if name: + matches = name in i.get('Names', []) + else: + image_matches = running_image in repo_tags - name_matches = False - if i["Names"]: - name_matches = (name and name in i['Names']) - image_matches = (running_image == image) - tag_matches = (not tag or running_tag == tag) - # if a container has an entrypoint, `command` will actually equal - # '{} {}'.format(entrypoint, command) - command_matches = (not command or running_command.endswith(command)) + # if a container has an entrypoint, `command` will actually equal + # '{} {}'.format(entrypoint, command) + command_matches = (not command or running_command.endswith(command)) - if name_matches or (name is None and image_matches and tag_matches and command_matches): + matches = image_matches and command_matches + + if matches: details = self.client.inspect_container(i['Id']) details = _docker_id_quirk(details) + deployed.append(details) return deployed def get_running_containers(self): - running = [] - for i in self.get_deployed_containers(): - if i['State']['Running'] == True and i['State'].get('Ghost', False) == False: - running.append(i) + return [c for c in self.get_deployed_containers() if is_running(c)] + + def pull_image(self): + extra_params = {} + if self.module.params.get('insecure_registry'): + if self.ensure_capability('insecure_registry', fail=False): + extra_params['insecure_registry'] = self.module.params.get('insecure_registry') - return running + resource = self.module.params.get('image') + image, tag = get_split_image_tag(resource) + if self.module.params.get('username'): + try: + self.client.login( + self.module.params.get('username'), + password=self.module.params.get('password'), + email=self.module.params.get('email'), + registry=self.module.params.get('registry') + ) + except e: + self.module.fail_json(msg="failed to login to the remote registry, check your username/password.", error=repr(e)) + try: + last = None + for line in self.client.pull(image, tag=tag, stream=True, **extra_params): + last = line + status = json.loads(last).get('status', '') + if status.startswith('Status: Image is up to date for'): + # Image is already up to date. Don't increment the counter. + pass + elif status.startswith('Status: Downloaded newer image for'): + # Image was updated. Increment the pull counter. + self.increment_counter('pulled') + else: + # Unrecognized status string. + self.module.fail_json(msg="Unrecognized status from pull.", status=status) + except e: + self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e)) def create_containers(self, count=1): params = {'image': self.module.params.get('image'), @@ -724,23 +1123,19 @@ class DockerManager(object): 'mem_limit': _human_to_bytes(self.module.params.get('memory_limit')), 'environment': self.env, 'hostname': self.module.params.get('hostname'), + 'domainname': self.module.params.get('domainname'), 'detach': self.module.params.get('detach'), 'name': self.module.params.get('name'), 'stdin_open': self.module.params.get('stdin_open'), 'tty': self.module.params.get('tty'), + 'volumes_from': self.module.params.get('volumes_from'), } - - if params['dns'] is not None: - self.ensure_capability('dns') + if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) >= 0: + params['volumes_from'] = "" if params['volumes_from'] is not None: self.ensure_capability('volumes_from') - extra_params = {} - if self.module.params.get('insecure_registry'): - if self.ensure_capability('insecure_registry', fail=False): - extra_params['insecure_registry'] = self.module.params.get('insecure_registry') - def do_create(count, params): results = [] for _ in range(count): @@ -753,23 +1148,7 @@ class DockerManager(object): try: containers = do_create(count, params) except: - resource = self.module.params.get('image') - image, tag = get_split_image_tag(resource) - if self.module.params.get('username'): - try: - self.client.login( - self.module.params.get('username'), - password=self.module.params.get('password'), - email=self.module.params.get('email'), - registry=self.module.params.get('registry') - ) - except: - self.module.fail_json(msg="failed to login to the remote registry, check your username/password.") - try: - self.client.pull(image, tag=tag, **extra_params) - except: - self.module.fail_json(msg="failed to pull the specified image: %s" % resource) - self.increment_counter('pull') + self.pull_image() containers = do_create(count, params) return containers @@ -780,12 +1159,13 @@ class DockerManager(object): 'binds': self.binds, 'port_bindings': self.port_bindings, 'publish_all_ports': self.module.params.get('publish_all_ports'), - 'privileged': self.module.params.get('privileged'), + 'privileged': self.module.params.get('privileged'), 'links': self.links, 'network_mode': self.module.params.get('net'), + 'pid_mode': self.module.params.get('pid'), } - optionals = [] + optionals = {} for optional_param in ('dns', 'volumes_from', 'restart_policy', 'restart_policy_retry'): optionals[optional_param] = self.module.params.get(optional_param) @@ -830,11 +1210,135 @@ class DockerManager(object): self.increment_counter('restarted') +class ContainerSet: + + def __init__(self, manager): + self.manager = manager + self.running = [] + self.deployed = [] + self.changed = [] + + def refresh(self): + ''' + Update our view of the matching containers from the Docker daemon. + ''' + + + self.deployed = self.manager.get_deployed_containers() + self.running = [c for c in self.deployed if is_running(c)] + + def notice_changed(self, containers): + ''' + Record a collection of containers as "changed". + ''' + + self.changed.extend(containers) + + +def present(manager, containers, count, name): + '''Ensure that exactly `count` matching containers exist in any state.''' + + containers.refresh() + delta = count - len(containers.deployed) + + if delta > 0: + containers.notice_changed(manager.create_containers(delta)) + + if delta < 0: + # If both running and stopped containers exist, remove + # stopped containers first. + containers.deployed.sort(lambda cx, cy: cmp(is_running(cx), is_running(cy))) + + to_stop = [] + to_remove = [] + for c in containers.deployed[0:-delta]: + if is_running(c): + to_stop.append(c) + to_remove.append(c) + + manager.stop_containers(to_stop) + manager.remove_containers(to_remove) + containers.notice_changed(to_remove) + +def started(manager, containers, count, name): + '''Ensure that exactly `count` matching containers exist and are running.''' + + containers.refresh() + delta = count - len(containers.running) + + if delta > 0: + if name and containers.deployed: + # A stopped container exists with the requested name. + # Clean it up before attempting to start a new one. + manager.remove_containers(containers.deployed) + + created = manager.create_containers(delta) + manager.start_containers(created) + containers.notice_changed(created) + + if delta < 0: + excess = containers.running[0:-delta] + manager.stop_containers(excess) + manager.remove_containers(excess) + containers.notice_changed(excess) + +def reloaded(manager, containers, count, name): + ''' + Ensure that exactly `count` matching containers exist and are + running. If any associated settings have been changed (volumes, + ports or so on), restart those containers. + ''' + + containers.refresh() + + for container in manager.get_differing_containers(): + manager.stop_containers([container]) + manager.remove_containers([container]) + + started(manager, containers, count, name) + +def restarted(manager, containers, count, name): + ''' + Ensure that exactly `count` matching containers exist and are + running. Unconditionally restart any that were already running. + ''' + + containers.refresh() + + manager.restart_containers(containers.running) + started(manager, containers, count, name) + +def stopped(manager, containers, count, name): + '''Stop any matching containers that are running.''' + + containers.refresh() + + manager.stop_containers(containers.running) + containers.notice_changed(containers.running) + +def killed(manager, containers, count, name): + '''Kill any matching containers that are running.''' + + containers.refresh() + + manager.kill_containers(containers.running) + containers.notice_changed(containers.running) + +def absent(manager, containers, count, name): + '''Stop and remove any matching containers.''' + + containers.refresh() + + manager.stop_containers(containers.running) + manager.remove_containers(containers.deployed) + containers.notice_changed(containers.deployed) + def main(): module = AnsibleModule( argument_spec = dict( count = dict(default=1), image = dict(required=True), + pull = dict(required=False, default='missing', choices=['missing', 'always']), command = dict(required=False, default=None), expose = dict(required=False, default=None, type='list'), ports = dict(required=False, default=None, type='list'), @@ -844,17 +1348,21 @@ def main(): links = dict(default=None, type='list'), memory_limit = dict(default=0), memory_swap = dict(default=0), - docker_url = dict(default='unix://var/run/docker.sock'), - docker_api_version = dict(default=docker.client.DEFAULT_DOCKER_API_VERSION), + docker_url = dict(), + docker_tls_cert = dict(), + docker_tls_key = dict(), + docker_tls_cacert = dict(), + docker_api_version = dict(), username = dict(default=None), password = dict(), email = dict(), registry = dict(), hostname = dict(default=None), + domainname = dict(default=None), env = dict(type='dict'), dns = dict(), detach = dict(default=True, type='bool'), - state = dict(default='running', choices=['absent', 'present', 'running', 'stopped', 'killed', 'restarted']), + state = dict(default='started', choices=['present', 'started', 'reloaded', 'restarted', 'stopped', 'killed', 'absent']), restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']), restart_policy_retry = dict(default=0, type='int'), debug = dict(default=False, type='bool'), @@ -864,6 +1372,7 @@ def main(): lxc_conf = dict(default=None, type='list'), name = dict(default=None), net = dict(default=None), + pid = dict(default=None), insecure_registry = dict(default=False, type='bool'), ) ) @@ -876,101 +1385,53 @@ def main(): count = int(module.params.get('count')) name = module.params.get('name') image = module.params.get('image') + pull = module.params.get('pull') if count < 0: module.fail_json(msg="Count must be greater than zero") + if count > 1 and name: module.fail_json(msg="Count and name must not be used together") - running_containers = manager.get_running_containers() - running_count = len(running_containers) - delta = count - running_count - deployed_containers = manager.get_deployed_containers() - facts = None - failed = False - changed = False - - # start/stop containers - if state in [ "running", "present" ]: + # Explicitly pull new container images, if requested. + # Do this before noticing running and deployed containers so that the image names will differ + # if a newer image has been pulled. + if pull == "always": + manager.pull_image() - # make sure a container with `name` exists, if not create and start it - if name: - # first determine if a container with this name exists - existing_container = None - for deployed_container in deployed_containers: - if deployed_container.get('Name') == '/%s' % name: - existing_container = deployed_container - break - - # the named container is running, but with a - # different image or tag, so we stop it first - if existing_container and existing_container.get('Config', dict()).get('Image') != image: - manager.stop_containers([existing_container]) - manager.remove_containers([existing_container]) - running_containers = manager.get_running_containers() - deployed_containers = manager.get_deployed_containers() - existing_container = None - - # if the container isn't running (or if we stopped the - # old version above), create and (maybe) start it up now - if not existing_container: - containers = manager.create_containers(1) - if state == "present": # otherwise it get (re)started later anyways.. - manager.start_containers(containers) - running_containers = manager.get_running_containers() - deployed_containers = manager.get_deployed_containers() - - if state == "running": - # make sure a container with `name` is running - if name and "/" + name not in map(lambda x: x.get('Name'), running_containers): - manager.start_containers(deployed_containers) - - # start more containers if we don't have enough - elif delta > 0: - containers = manager.create_containers(delta) - manager.start_containers(containers) - - # stop containers if we have too many - elif delta < 0: - containers_to_stop = running_containers[0:abs(delta)] - containers = manager.stop_containers(containers_to_stop) - manager.remove_containers(containers_to_stop) - - facts = manager.get_running_containers() - else: - facts = manager.get_deployed_containers() - - # stop and remove containers - elif state == "absent": - facts = manager.stop_containers(deployed_containers) - manager.remove_containers(deployed_containers) - - # stop containers - elif state == "stopped": - facts = manager.stop_containers(running_containers) - - # kill containers - elif state == "killed": - manager.kill_containers(running_containers) - - # restart containers - elif state == "restarted": - manager.restart_containers(running_containers) - facts = manager.get_inspect_containers(running_containers) + containers = ContainerSet(manager) + failed = False - msg = "%s container(s) running image %s with command %s" % \ - (manager.get_summary_counters_msg(), module.params.get('image'), module.params.get('command')) - changed = manager.has_changed() + if state == 'present': + present(manager, containers, count, name) + elif state == 'started': + started(manager, containers, count, name) + elif state == 'reloaded': + reloaded(manager, containers, count, name) + elif state == 'restarted': + restarted(manager, containers, count, name) + elif state == 'stopped': + stopped(manager, containers, count, name) + elif state == 'killed': + killed(manager, containers, count, name) + elif state == 'absent': + absent(manager, containers, count, name) + else: + module.fail_json(msg='Unrecognized state %s. Must be one of: ' + 'present; started; reloaded; restarted; ' + 'stopped; killed; absent.' % state) - module.exit_json(failed=failed, changed=changed, msg=msg, ansible_facts=_ansible_facts(facts)) + module.exit_json(changed=manager.has_changed(), + msg=manager.get_summary_message(), + summary=manager.counters, + containers=containers.changed, + reload_reasons=manager.get_reload_reason_message()) - except DockerAPIError, e: - changed = manager.has_changed() - module.exit_json(failed=True, changed=changed, msg="Docker API error: " + e.explanation) + except DockerAPIError as e: + module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation) - except RequestException, e: - changed = manager.has_changed() - module.exit_json(failed=True, changed=changed, msg=repr(e)) + except RequestException as e: + module.fail_json(changed=manager.has_changed(), msg=repr(e)) # import module snippets from ansible.module_utils.basic import * diff --git a/cloud/docker/_docker_image.py b/cloud/docker/docker_image.py similarity index 100% rename from cloud/docker/_docker_image.py rename to cloud/docker/docker_image.py diff --git a/cloud/google/gce.py b/cloud/google/gce.py index d429b61de20..68203736789 100644 --- a/cloud/google/gce.py +++ b/cloud/google/gce.py @@ -115,6 +115,27 @@ options: required: true default: "us-central1-a" aliases: [] + ip_forward: + version_added: "1.9" + description: + - set to true if the instance can forward ip packets (useful for gateways) + required: false + default: "false" + aliases: [] + external_ip: + version_added: "1.9" + description: + - type of external ip, ephemeral by default + required: false + default: "ephemeral" + aliases: [] + disk_auto_delete: + version_added: "1.9" + description: + - if set boot disk will be removed after instance destruction + required: false + default: "true" + aliases: [] requirements: [ "libcloud" ] notes: @@ -223,6 +244,12 @@ def get_instance_info(inst): key=lambda disk_info: disk_info['index'])] else: disk_names = [] + + if len(inst.public_ips) == 0: + public_ip = None + else: + public_ip = inst.public_ips[0] + return({ 'image': not inst.image is None and inst.image.split('/')[-1] or None, 'disks': disk_names, @@ -231,11 +258,11 @@ def get_instance_info(inst): 'name': inst.name, 'network': netname, 'private_ip': inst.private_ips[0], - 'public_ip': inst.public_ips[0], + 'public_ip': public_ip, 'status': ('status' in inst.extra) and inst.extra['status'] or None, 'tags': ('tags' in inst.extra) and inst.extra['tags'] or [], 'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None, - }) + }) def create_instances(module, gce, instance_names): """Creates new instances. Attributes other than instance_names are picked @@ -259,6 +286,12 @@ def create_instances(module, gce, instance_names): state = module.params.get('state') tags = module.params.get('tags') zone = module.params.get('zone') + ip_forward = module.params.get('ip_forward') + external_ip = module.params.get('external_ip') + disk_auto_delete = module.params.get('disk_auto_delete') + + if external_ip == "none": + external_ip = None new_instances = [] changed = False @@ -319,7 +352,8 @@ def create_instances(module, gce, instance_names): try: inst = gce.create_node(name, lc_machine_type, lc_image, location=lc_zone, ex_network=network, ex_tags=tags, - ex_metadata=metadata, ex_boot_disk=pd) + ex_metadata=metadata, ex_boot_disk=pd, ex_can_ip_forward=ip_forward, + external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete) changed = True except ResourceExistsError: inst = gce.ex_get_node(name, lc_zone) @@ -409,6 +443,10 @@ def main(): service_account_email = dict(), pem_file = dict(), project_id = dict(), + ip_forward = dict(type='bool', default=False), + external_ip = dict(choices=['ephemeral', 'none'], + default='ephemeral'), + disk_auto_delete = dict(type='bool', default=True), ) ) @@ -424,6 +462,7 @@ def main(): state = module.params.get('state') tags = module.params.get('tags') zone = module.params.get('zone') + ip_forward = module.params.get('ip_forward') changed = False inames = [] diff --git a/cloud/google/gce_net.py b/cloud/google/gce_net.py index 102a73f2bd1..bafe6d1d43a 100644 --- a/cloud/google/gce_net.py +++ b/cloud/google/gce_net.py @@ -66,6 +66,13 @@ options: required: false default: null aliases: [] + target_tags: + version_added: "1.9" + description: + - the target instance tags for creating a firewall rule + required: false + default: null + aliases: [] state: description: - desired state of the persistent disk @@ -156,8 +163,9 @@ def main(): ipv4_range = dict(), fwname = dict(), name = dict(), - src_range = dict(), + src_range = dict(type='list'), src_tags = dict(type='list'), + target_tags = dict(type='list'), state = dict(default='present'), service_account_email = dict(), pem_file = dict(), @@ -173,6 +181,7 @@ def main(): name = module.params.get('name') src_range = module.params.get('src_range') src_tags = module.params.get('src_tags') + target_tags = module.params.get('target_tags') state = module.params.get('state') changed = False @@ -218,7 +227,7 @@ def main(): try: gce.ex_create_firewall(fwname, allowed_list, network=name, - source_ranges=src_range, source_tags=src_tags) + source_ranges=src_range, source_tags=src_tags, target_tags=target_tags) changed = True except ResourceExistsError: pass @@ -229,6 +238,7 @@ def main(): json_output['allowed'] = allowed json_output['src_range'] = src_range json_output['src_tags'] = src_tags + json_output['target_tags'] = target_tags if state in ['absent', 'deleted']: if fwname: diff --git a/cloud/google/gce_pd.py b/cloud/google/gce_pd.py index ddfe711304e..1847f0eeb93 100644 --- a/cloud/google/gce_pd.py +++ b/cloud/google/gce_pd.py @@ -108,6 +108,14 @@ options: required: false default: null aliases: [] + disk_type: + version_added: "1.9" + description: + - type of disk provisioned + required: false + default: "pd-standard" + choices: ["pd-standard", "pd-ssd"] + aliases: [] requirements: [ "libcloud" ] author: Eric Johnson @@ -144,6 +152,7 @@ def main(): mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']), name = dict(required=True), size_gb = dict(default=10), + disk_type = dict(default='pd-standard'), image = dict(), snapshot = dict(), state = dict(default='present'), @@ -161,6 +170,7 @@ def main(): mode = module.params.get('mode') name = module.params.get('name') size_gb = module.params.get('size_gb') + disk_type = module.params.get('disk_type') image = module.params.get('image') snapshot = module.params.get('snapshot') state = module.params.get('state') @@ -174,7 +184,7 @@ def main(): disk = inst = None changed = is_attached = False - json_output = { 'name': name, 'zone': zone, 'state': state } + json_output = { 'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type } if detach_only: json_output['detach_only'] = True json_output['detached_from_instance'] = instance_name @@ -233,7 +243,7 @@ def main(): try: disk = gce.create_volume( size_gb, name, location=zone, image=lc_image, - snapshot=lc_snapshot) + snapshot=lc_snapshot, ex_disk_type=disk_type) except ResourceExistsError: pass except QuotaExceededError: diff --git a/cloud/openstack/glance_image.py b/cloud/openstack/_glance_image.py similarity index 99% rename from cloud/openstack/glance_image.py rename to cloud/openstack/_glance_image.py index 6425fa2ca5d..947b984a761 100644 --- a/cloud/openstack/glance_image.py +++ b/cloud/openstack/_glance_image.py @@ -20,6 +20,7 @@ DOCUMENTATION = ''' --- module: glance_image version_added: "1.2" +deprecated: Deprecated in 1.9. Use os_image instead short_description: Add/Delete images from glance description: - Add or Remove images from the glance repository. diff --git a/cloud/openstack/keystone_user.py b/cloud/openstack/_keystone_user.py similarity index 99% rename from cloud/openstack/keystone_user.py rename to cloud/openstack/_keystone_user.py index 4af254bfe6d..9bc5cc9520f 100644 --- a/cloud/openstack/keystone_user.py +++ b/cloud/openstack/_keystone_user.py @@ -7,6 +7,7 @@ DOCUMENTATION = ''' --- module: keystone_user version_added: "1.2" +deprecated: Deprecated in 1.9. Use os_keystone_user instead short_description: Manage OpenStack Identity (keystone) users, tenants and roles description: - Manage users,tenants, roles from OpenStack. diff --git a/cloud/openstack/nova_compute.py b/cloud/openstack/_nova_compute.py similarity index 96% rename from cloud/openstack/nova_compute.py rename to cloud/openstack/_nova_compute.py index b51a1891a7d..0b911e7659f 100644 --- a/cloud/openstack/nova_compute.py +++ b/cloud/openstack/_nova_compute.py @@ -22,7 +22,7 @@ import os try: from novaclient.v1_1 import client as nova_client - from novaclient.v1_1 import floating_ips + from novaclient.v1_1 import floating_ips from novaclient import exceptions from novaclient import utils import time @@ -33,6 +33,7 @@ DOCUMENTATION = ''' --- module: nova_compute version_added: "1.2" +deprecated: Deprecated in 1.9. Use os_server instead short_description: Create/Delete VMs from OpenStack description: - Create or Remove virtual machines from Openstack. @@ -168,6 +169,12 @@ options: required: false default: None version_added: "1.6" + scheduler_hints: + description: + - Arbitrary key/value pairs to the scheduler for custom use + required: false + default: None + version_added: "1.9" requirements: ["novaclient"] ''' @@ -294,15 +301,15 @@ def _add_floating_ip_from_pool(module, nova, server): # instantiate FloatingIPManager object floating_ip_obj = floating_ips.FloatingIPManager(nova) - # empty dict and list - usable_floating_ips = {} + # empty dict and list + usable_floating_ips = {} pools = [] # user specified pools = module.params['floating_ip_pools'] - # get the list of all floating IPs. Mileage may - # vary according to Nova Compute configuration + # get the list of all floating IPs. Mileage may + # vary according to Nova Compute configuration # per cloud provider all_floating_ips = floating_ip_obj.list() @@ -324,7 +331,7 @@ def _add_floating_ip_from_pool(module, nova, server): try: new_ip = nova.floating_ips.create(pool) except Exception, e: - module.fail_json(msg = "Unable to create floating ip") + module.fail_json(msg = "Unable to create floating ip: %s" % (e.message)) pool_ips.append(new_ip.ip) # Add to the main list usable_floating_ips[pool] = pool_ips @@ -356,7 +363,7 @@ def _add_auto_floating_ip(module, nova, server): try: new_ip = nova.floating_ips.create() except Exception as e: - module.fail_json(msg = "Unable to create floating ip: %s" % (e.message)) + module.fail_json(msg = "Unable to create floating ip: %s" % (e)) try: server.add_floating_ip(new_ip) @@ -378,9 +385,9 @@ def _add_floating_ip(module, nova, server): else: return server - # this may look redundant, but if there is now a + # this may look redundant, but if there is now a # floating IP, then it needs to be obtained from - # a recent server object if the above code path exec'd + # a recent server object if the above code path exec'd try: server = nova.servers.get(server.id) except Exception, e: @@ -422,7 +429,7 @@ def _create_server(module, nova): 'config_drive': module.params['config_drive'], } - for optional_param in ('region_name', 'key_name', 'availability_zone'): + for optional_param in ('region_name', 'key_name', 'availability_zone', 'scheduler_hints'): if module.params[optional_param]: bootkwargs[optional_param] = module.params[optional_param] try: @@ -443,7 +450,7 @@ def _create_server(module, nova): private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private') public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public') - # now exit with info + # now exit with info module.exit_json(changed = True, id = server.id, private_ip=''.join(private), public_ip=''.join(public), status = server.status, info = server._info) if server.status == 'ERROR': @@ -543,6 +550,7 @@ def main(): auto_floating_ip = dict(default=False, type='bool'), floating_ips = dict(default=None), floating_ip_pools = dict(default=None), + scheduler_hints = dict(default=None), )) module = AnsibleModule( argument_spec=argument_spec, @@ -582,4 +590,3 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.openstack import * main() - diff --git a/cloud/openstack/nova_keypair.py b/cloud/openstack/_nova_keypair.py similarity index 98% rename from cloud/openstack/nova_keypair.py rename to cloud/openstack/_nova_keypair.py index c7c9affb3e6..14d3f24259e 100644 --- a/cloud/openstack/nova_keypair.py +++ b/cloud/openstack/_nova_keypair.py @@ -28,6 +28,7 @@ DOCUMENTATION = ''' --- module: nova_keypair version_added: "1.2" +deprecated: Deprecated in 1.9. Use os_keypair instead short_description: Add/Delete key pair from nova description: - Add or Remove key pair from nova . diff --git a/cloud/openstack/quantum_floating_ip.py b/cloud/openstack/_quantum_floating_ip.py similarity index 99% rename from cloud/openstack/quantum_floating_ip.py rename to cloud/openstack/_quantum_floating_ip.py index 17f78effffd..821e8b608b4 100644 --- a/cloud/openstack/quantum_floating_ip.py +++ b/cloud/openstack/_quantum_floating_ip.py @@ -31,6 +31,7 @@ DOCUMENTATION = ''' --- module: quantum_floating_ip version_added: "1.2" +deprecated: Deprecated in 1.9. Use os_floating_ip instead short_description: Add/Remove floating IP from an instance description: - Add or Remove a floating IP to an instance diff --git a/cloud/openstack/quantum_floating_ip_associate.py b/cloud/openstack/_quantum_floating_ip_associate.py similarity index 99% rename from cloud/openstack/quantum_floating_ip_associate.py rename to cloud/openstack/_quantum_floating_ip_associate.py index 91df2690b62..88720529d65 100644 --- a/cloud/openstack/quantum_floating_ip_associate.py +++ b/cloud/openstack/_quantum_floating_ip_associate.py @@ -31,6 +31,7 @@ DOCUMENTATION = ''' --- module: quantum_floating_ip_associate version_added: "1.2" +deprecated: Deprecated in 1.9. Use os_floating_ip instead short_description: Associate or disassociate a particular floating IP with an instance description: - Associates or disassociates a specific floating IP with a particular instance diff --git a/cloud/openstack/quantum_network.py b/cloud/openstack/_quantum_network.py similarity index 99% rename from cloud/openstack/quantum_network.py rename to cloud/openstack/_quantum_network.py index 6b0c66e7a12..7c66af07295 100644 --- a/cloud/openstack/quantum_network.py +++ b/cloud/openstack/_quantum_network.py @@ -29,6 +29,7 @@ DOCUMENTATION = ''' --- module: quantum_network version_added: "1.4" +deprecated: Deprecated in 1.9. Use os_network instead short_description: Creates/Removes networks from OpenStack description: - Add or Remove network from OpenStack. diff --git a/cloud/openstack/quantum_router.py b/cloud/openstack/_quantum_router.py similarity index 99% rename from cloud/openstack/quantum_router.py rename to cloud/openstack/_quantum_router.py index 38d479128f2..154eff84d90 100644 --- a/cloud/openstack/quantum_router.py +++ b/cloud/openstack/_quantum_router.py @@ -30,6 +30,7 @@ DOCUMENTATION = ''' module: quantum_router version_added: "1.2" short_description: Create or Remove router from openstack +deprecated: Deprecated in 1.9. Use os_router instead description: - Create or Delete routers from OpenStack options: diff --git a/cloud/openstack/quantum_router_gateway.py b/cloud/openstack/_quantum_router_gateway.py similarity index 99% rename from cloud/openstack/quantum_router_gateway.py rename to cloud/openstack/_quantum_router_gateway.py index 5de19fd4785..cbf6a841c5c 100644 --- a/cloud/openstack/quantum_router_gateway.py +++ b/cloud/openstack/_quantum_router_gateway.py @@ -28,6 +28,7 @@ DOCUMENTATION = ''' --- module: quantum_router_gateway version_added: "1.2" +deprecated: Deprecated in 1.9. Use os_router_gateway instead short_description: set/unset a gateway interface for the router with the specified external network description: - Creates/Removes a gateway interface from the router, used to associate a external network with a router to route external traffic. diff --git a/cloud/openstack/quantum_router_interface.py b/cloud/openstack/_quantum_router_interface.py similarity index 99% rename from cloud/openstack/quantum_router_interface.py rename to cloud/openstack/_quantum_router_interface.py index c5828ad4106..3e758d7d920 100644 --- a/cloud/openstack/quantum_router_interface.py +++ b/cloud/openstack/_quantum_router_interface.py @@ -27,6 +27,7 @@ except ImportError: DOCUMENTATION = ''' --- module: quantum_router_interface +deprecated: Deprecated in 1.9. Use os_router_interface instead version_added: "1.2" short_description: Attach/Dettach a subnet's interface to a router description: diff --git a/cloud/openstack/quantum_subnet.py b/cloud/openstack/_quantum_subnet.py similarity index 99% rename from cloud/openstack/quantum_subnet.py rename to cloud/openstack/_quantum_subnet.py index e38b2c94aa6..2d485c15962 100644 --- a/cloud/openstack/quantum_subnet.py +++ b/cloud/openstack/_quantum_subnet.py @@ -29,6 +29,7 @@ DOCUMENTATION = ''' --- module: quantum_subnet version_added: "1.2" +deprecated: Deprecated in 1.9. Use os_subnet instead short_description: Add/remove subnet from a network description: - Add/remove subnet from a network diff --git a/cloud/rackspace/rax.py b/cloud/rackspace/rax.py index 5fa1b57386a..874274c22f3 100644 --- a/cloud/rackspace/rax.py +++ b/cloud/rackspace/rax.py @@ -35,6 +35,34 @@ options: - "yes" - "no" version_added: 1.5 + boot_from_volume: + description: + - Whether or not to boot the instance from a Cloud Block Storage volume. + If C(yes) and I(image) is specified a new volume will be created at + boot time. I(boot_volume_size) is required with I(image) to create a + new volume at boot time. + default: "no" + choices: + - "yes" + - "no" + version_added: 1.9 + boot_volume: + description: + - Cloud Block Storage ID or Name to use as the boot volume of the + instance + version_added: 1.9 + boot_volume_size: + description: + - Size of the volume to create in Gigabytes. This is only required with + I(image) and I(boot_from_volume). + default: 100 + version_added: 1.9 + boot_volume_terminate: + description: + - Whether the I(boot_volume) or newly created volume from I(image) will + be terminated when the server is terminated + default: false + version_added: 1.9 config_drive: description: - Attach read-only configuration drive to server as label config-2 @@ -99,7 +127,9 @@ options: version_added: 1.4 image: description: - - image to use for the instance. Can be an C(id), C(human_id) or C(name) + - image to use for the instance. Can be an C(id), C(human_id) or C(name). + With I(boot_from_volume), a Cloud Block Storage volume will be created + with this image default: null instance_ids: description: @@ -210,10 +240,43 @@ except ImportError: HAS_PYRAX = False +def rax_find_server_image(module, server, image, boot_volume): + if not image and boot_volume: + vol = rax_find_bootable_volume(module, pyrax, server, + exit=False) + if not vol: + return None + volume_image_metadata = vol.volume_image_metadata + vol_image_id = volume_image_metadata.get('image_id') + if vol_image_id: + server_image = rax_find_image(module, pyrax, + vol_image_id, exit=False) + if server_image: + server.image = dict(id=server_image) + + # Match image IDs taking care of boot from volume + if image and not server.image: + vol = rax_find_bootable_volume(module, pyrax, server) + volume_image_metadata = vol.volume_image_metadata + vol_image_id = volume_image_metadata.get('image_id') + if not vol_image_id: + return None + server_image = rax_find_image(module, pyrax, + vol_image_id, exit=False) + if image != server_image: + return None + + server.image = dict(id=server_image) + elif image and server.image['id'] != image: + return None + + return server.image + + def create(module, names=[], flavor=None, image=None, meta={}, key_name=None, files={}, wait=True, wait_timeout=300, disk_config=None, group=None, nics=[], extra_create_args={}, user_data=None, - config_drive=False, existing=[]): + config_drive=False, existing=[], block_device_mapping_v2=[]): cs = pyrax.cloudservers changed = False @@ -239,6 +302,7 @@ def create(module, names=[], flavor=None, image=None, meta={}, key_name=None, module.fail_json(msg='Failed to load %s' % lpath) try: servers = [] + bdmv2 = block_device_mapping_v2 for name in names: servers.append(cs.servers.create(name=name, image=image, flavor=flavor, meta=meta, @@ -247,9 +311,14 @@ def create(module, names=[], flavor=None, image=None, meta={}, key_name=None, disk_config=disk_config, config_drive=config_drive, userdata=user_data, + block_device_mapping_v2=bdmv2, **extra_create_args)) except Exception, e: - module.fail_json(msg='%s' % e.message) + if e.message: + msg = str(e.message) + else: + msg = repr(e) + module.fail_json(msg=msg) else: changed = True @@ -394,7 +463,9 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, disk_config=None, count=1, group=None, instance_ids=[], exact_count=False, networks=[], count_offset=0, auto_increment=False, extra_create_args={}, user_data=None, - config_drive=False): + config_drive=False, boot_from_volume=False, + boot_volume=None, boot_volume_size=None, + boot_volume_terminate=False): cs = pyrax.cloudservers cnw = pyrax.cloud_networks if not cnw: @@ -402,6 +473,26 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') + if state == 'present' or (state == 'absent' and instance_ids is None): + if not boot_from_volume and not boot_volume and not image: + module.fail_json(msg='image is required for the "rax" module') + + for arg, value in dict(name=name, flavor=flavor).iteritems(): + if not value: + module.fail_json(msg='%s is required for the "rax" module' % + arg) + + if boot_from_volume and not image and not boot_volume: + module.fail_json(msg='image or boot_volume are required for the ' + '"rax" with boot_from_volume') + + if boot_from_volume and image and not boot_volume_size: + module.fail_json(msg='boot_volume_size is required for the "rax" ' + 'module with boot_from_volume and image') + + if boot_from_volume and image and boot_volume: + image = None + servers = [] # Add the group meta key @@ -438,12 +529,6 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, # act on the state if state == 'present': - for arg, value in dict(name=name, flavor=flavor, - image=image).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax" module' % - arg) - # Idempotent ensurance of a specific count of servers if exact_count is not False: # See if we can find servers that match our options @@ -583,7 +668,6 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, # Perform more simplistic matching search_opts = { 'name': '^%s$' % name, - 'image': image, 'flavor': flavor } servers = [] @@ -591,6 +675,11 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, # Ignore DELETED servers if server.status == 'DELETED': continue + + if not rax_find_server_image(module, server, image, + boot_volume): + continue + # Ignore servers with non matching metadata if server.metadata != meta: continue @@ -616,34 +705,57 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, # them, we aren't performing auto_increment here names = [name] * (count - len(servers)) + block_device_mapping_v2 = [] + if boot_from_volume: + mapping = { + 'boot_index': '0', + 'delete_on_termination': boot_volume_terminate, + 'destination_type': 'volume', + } + if image: + mapping.update({ + 'uuid': image, + 'source_type': 'image', + 'volume_size': boot_volume_size, + }) + image = None + elif boot_volume: + volume = rax_find_volume(module, pyrax, boot_volume) + mapping.update({ + 'uuid': pyrax.utils.get_id(volume), + 'source_type': 'volume', + }) + block_device_mapping_v2.append(mapping) + create(module, names=names, flavor=flavor, image=image, meta=meta, key_name=key_name, files=files, wait=wait, wait_timeout=wait_timeout, disk_config=disk_config, group=group, nics=nics, extra_create_args=extra_create_args, user_data=user_data, config_drive=config_drive, - existing=servers) + existing=servers, + block_device_mapping_v2=block_device_mapping_v2) elif state == 'absent': if instance_ids is None: # We weren't given an explicit list of server IDs to delete # Let's match instead - for arg, value in dict(name=name, flavor=flavor, - image=image).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax" ' - 'module' % arg) search_opts = { 'name': '^%s$' % name, - 'image': image, 'flavor': flavor } for server in cs.servers.list(search_opts=search_opts): # Ignore DELETED servers if server.status == 'DELETED': continue + + if not rax_find_server_image(module, server, image, + boot_volume): + continue + # Ignore servers with non matching metadata if meta != server.metadata: continue + servers.append(server) # Build a list of server IDs to delete @@ -672,6 +784,10 @@ def main(): argument_spec.update( dict( auto_increment=dict(default=True, type='bool'), + boot_from_volume=dict(default=False, type='bool'), + boot_volume=dict(type='str'), + boot_volume_size=dict(type='int', default=100), + boot_volume_terminate=dict(type='bool', default=False), config_drive=dict(default=False, type='bool'), count=dict(default=1, type='int'), count_offset=dict(default=1, type='int'), @@ -712,6 +828,10 @@ def main(): 'playbook pertaining to the "rax" module') auto_increment = module.params.get('auto_increment') + boot_from_volume = module.params.get('boot_from_volume') + boot_volume = module.params.get('boot_volume') + boot_volume_size = module.params.get('boot_volume_size') + boot_volume_terminate = module.params.get('boot_volume_terminate') config_drive = module.params.get('config_drive') count = module.params.get('count') count_offset = module.params.get('count_offset') @@ -757,7 +877,9 @@ def main(): exact_count=exact_count, networks=networks, count_offset=count_offset, auto_increment=auto_increment, extra_create_args=extra_create_args, user_data=user_data, - config_drive=config_drive) + config_drive=config_drive, boot_from_volume=boot_from_volume, + boot_volume=boot_volume, boot_volume_size=boot_volume_size, + boot_volume_terminate=boot_volume_terminate) # import module snippets diff --git a/cloud/rackspace/rax_cbs.py b/cloud/rackspace/rax_cbs.py index a1b6ce46a6e..6f922f0128e 100644 --- a/cloud/rackspace/rax_cbs.py +++ b/cloud/rackspace/rax_cbs.py @@ -28,6 +28,12 @@ options: description: - Description to give the volume being created default: null + image: + description: + - image to use for bootable volumes. Can be an C(id), C(human_id) or + C(name). This option requires C(pyrax>=1.9.3) + default: null + version_added: 1.9 meta: description: - A hash of metadata to associate with the volume @@ -99,6 +105,8 @@ EXAMPLES = ''' register: my_volume ''' +from distutils.version import LooseVersion + try: import pyrax HAS_PYRAX = True @@ -107,14 +115,8 @@ except ImportError: def cloud_block_storage(module, state, name, description, meta, size, - snapshot_id, volume_type, wait, wait_timeout): - for arg in (state, name, size, volume_type): - if not arg: - module.fail_json(msg='%s is required for rax_cbs' % arg) - - if size < 100: - module.fail_json(msg='"size" must be greater than or equal to 100') - + snapshot_id, volume_type, wait, wait_timeout, + image): changed = False volume = None instance = {} @@ -126,15 +128,26 @@ def cloud_block_storage(module, state, name, description, meta, size, 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') + if image: + # pyrax<1.9.3 did not have support for specifying an image when + # creating a volume which is required for bootable volumes + if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'): + module.fail_json(msg='Creating a bootable volume requires ' + 'pyrax>=1.9.3') + image = rax_find_image(module, pyrax, image) + volume = rax_find_volume(module, pyrax, name) if state == 'present': if not volume: + kwargs = dict() + if image: + kwargs['image'] = image try: volume = cbs.create(name, size=size, volume_type=volume_type, description=description, metadata=meta, - snapshot_id=snapshot_id) + snapshot_id=snapshot_id, **kwargs) changed = True except Exception, e: module.fail_json(msg='%s' % e.message) @@ -145,10 +158,7 @@ def cloud_block_storage(module, state, name, description, meta, size, attempts=attempts) volume.get() - for key, value in vars(volume).iteritems(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value + instance = rax_to_dict(volume) result = dict(changed=changed, volume=instance) @@ -164,6 +174,7 @@ def cloud_block_storage(module, state, name, description, meta, size, elif state == 'absent': if volume: + instance = rax_to_dict(volume) try: volume.delete() changed = True @@ -177,7 +188,8 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - description=dict(), + description=dict(type='str'), + image=dict(type='str'), meta=dict(type='dict', default={}), name=dict(required=True), size=dict(type='int', default=100), @@ -198,6 +210,7 @@ def main(): module.fail_json(msg='pyrax is required for this module') description = module.params.get('description') + image = module.params.get('image') meta = module.params.get('meta') name = module.params.get('name') size = module.params.get('size') @@ -210,11 +223,12 @@ def main(): setup_rax_module(module, pyrax) cloud_block_storage(module, state, name, description, meta, size, - snapshot_id, volume_type, wait, wait_timeout) + snapshot_id, volume_type, wait, wait_timeout, + image) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.rax import * -### invoke the module +# invoke the module main() diff --git a/cloud/rackspace/rax_cbs_attachments.py b/cloud/rackspace/rax_cbs_attachments.py index 365f93cd6e2..870b8e611df 100644 --- a/cloud/rackspace/rax_cbs_attachments.py +++ b/cloud/rackspace/rax_cbs_attachments.py @@ -90,11 +90,6 @@ except ImportError: def cloud_block_storage_attachments(module, state, volume, server, device, wait, wait_timeout): - for arg in (state, volume, server, device): - if not arg: - module.fail_json(msg='%s is required for rax_cbs_attachments' % - arg) - cbs = pyrax.cloud_blockstorage cs = pyrax.cloudservers @@ -133,7 +128,7 @@ def cloud_block_storage_attachments(module, state, volume, server, device, not key.startswith('_')): instance[key] = value - result = dict(changed=changed, volume=instance) + result = dict(changed=changed) if volume.status == 'error': result['msg'] = '%s failed to build' % volume.id @@ -142,6 +137,9 @@ def cloud_block_storage_attachments(module, state, volume, server, device, pyrax.utils.wait_until(volume, 'status', 'in-use', interval=5, attempts=attempts) + volume.get() + result['volume'] = rax_to_dict(volume) + if 'msg' in result: module.fail_json(**result) else: @@ -167,12 +165,7 @@ def cloud_block_storage_attachments(module, state, volume, server, device, elif volume.attachments: module.fail_json(msg='Volume is attached to another server') - for key, value in vars(volume).iteritems(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value - - result = dict(changed=changed, volume=instance) + result = dict(changed=changed, volume=rax_to_dict(volume)) if volume.status == 'error': result['msg'] = '%s failed to build' % volume.id diff --git a/cloud/rackspace/rax_cdb_database.py b/cloud/rackspace/rax_cdb_database.py index 421b6dcb094..cc7885ee31e 100644 --- a/cloud/rackspace/rax_cdb_database.py +++ b/cloud/rackspace/rax_cdb_database.py @@ -79,12 +79,6 @@ def find_database(instance, name): def save_database(module, cdb_id, name, character_set, collate): - - for arg, value in dict(cdb_id=cdb_id, name=name).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_database" ' - 'module' % arg) - cdb = pyrax.cloud_databases try: @@ -111,12 +105,6 @@ def save_database(module, cdb_id, name, character_set, collate): def delete_database(module, cdb_id, name): - - for arg, value in dict(cdb_id=cdb_id, name=name).iteritems(): - if not value: - module.fail_json(msg='%s is required for the "rax_cdb_database" ' - 'module' % arg) - cdb = pyrax.cloud_databases try: @@ -136,7 +124,8 @@ def delete_database(module, cdb_id, name): else: changed = True - module.exit_json(changed=changed, action='delete') + module.exit_json(changed=changed, action='delete', + database=rax_to_dict(database)) def rax_cdb_database(module, state, cdb_id, name, character_set, collate): diff --git a/cloud/rackspace/rax_clb.py b/cloud/rackspace/rax_clb.py index 7a2699709da..a3deae6f4a7 100644 --- a/cloud/rackspace/rax_clb.py +++ b/cloud/rackspace/rax_clb.py @@ -140,10 +140,6 @@ except ImportError: def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, vip_type, timeout, wait, wait_timeout, vip_id): - for arg in (state, name, port, protocol, vip_type): - if not arg: - module.fail_json(msg='%s is required for rax_clb' % arg) - if int(timeout) < 30: module.fail_json(msg='"timeout" must be greater than or equal to 30') @@ -156,7 +152,14 @@ def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') - for balancer in clb.list(): + balancer_list = clb.list() + while balancer_list: + retrieved = clb.list(marker=balancer_list.pop().id) + balancer_list.extend(retrieved) + if len(retrieved) < 2: + break + + for balancer in balancer_list: if name != balancer.name and name != balancer.id: continue @@ -257,7 +260,7 @@ def main(): algorithm=dict(choices=CLB_ALGORITHMS, default='LEAST_CONNECTIONS'), meta=dict(type='dict', default={}), - name=dict(), + name=dict(required=True), port=dict(type='int', default=80), protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'), state=dict(default='present', choices=['present', 'absent']), diff --git a/cloud/rackspace/rax_clb_nodes.py b/cloud/rackspace/rax_clb_nodes.py index 24325b44597..472fad19b1c 100644 --- a/cloud/rackspace/rax_clb_nodes.py +++ b/cloud/rackspace/rax_clb_nodes.py @@ -150,21 +150,6 @@ def _get_node(lb, node_id=None, address=None, port=None): return None -def _is_primary(node): - """Return True if node is primary and enabled""" - return (node.type.lower() == 'primary' and - node.condition.lower() == 'enabled') - - -def _get_primary_nodes(lb): - """Return a list of primary and enabled nodes""" - nodes = [] - for node in lb.nodes: - if _is_primary(node): - nodes.append(node) - return nodes - - def main(): argument_spec = rax_argument_spec() argument_spec.update( @@ -230,13 +215,6 @@ def main(): if state == 'absent': if not node: # Removing a non-existent node module.exit_json(changed=False, state=state) - - # The API detects this as well but currently pyrax does not return a - # meaningful error message - if _is_primary(node) and len(_get_primary_nodes(lb)) == 1: - module.fail_json( - msg='At least one primary node has to be enabled') - try: lb.delete_node(node) result = {} @@ -299,5 +277,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.rax import * -### invoke the module +# invoke the module main() diff --git a/cloud/rackspace/rax_identity.py b/cloud/rackspace/rax_identity.py index ea40ea2ef46..47b4cb60cf0 100644 --- a/cloud/rackspace/rax_identity.py +++ b/cloud/rackspace/rax_identity.py @@ -55,10 +55,6 @@ except ImportError: def cloud_identity(module, state, identity): - for arg in (state, identity): - if not arg: - module.fail_json(msg='%s is required for rax_identity' % arg) - instance = dict( authenticated=identity.authenticated, credentials=identity._creds_file @@ -79,7 +75,7 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - state=dict(default='present', choices=['present', 'absent']) + state=dict(default='present', choices=['present']) ) ) @@ -95,7 +91,7 @@ def main(): setup_rax_module(module, pyrax) - if pyrax.identity is None: + if not pyrax.identity: module.fail_json(msg='Failed to instantiate client. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') @@ -106,5 +102,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.rax import * -### invoke the module +# invoke the module main() diff --git a/cloud/rackspace/rax_keypair.py b/cloud/rackspace/rax_keypair.py index 591ad8c3597..8f38abc12e0 100644 --- a/cloud/rackspace/rax_keypair.py +++ b/cloud/rackspace/rax_keypair.py @@ -104,7 +104,7 @@ def rax_keypair(module, name, public_key, state): keypair = {} if state == 'present': - if os.path.isfile(public_key): + if public_key and os.path.isfile(public_key): try: f = open(public_key) public_key = f.read() @@ -143,7 +143,7 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - name=dict(), + name=dict(required=True), public_key=dict(), state=dict(default='present', choices=['absent', 'present']), ) diff --git a/cloud/rackspace/rax_network.py b/cloud/rackspace/rax_network.py index bc4745a7a84..bd23f5f878d 100644 --- a/cloud/rackspace/rax_network.py +++ b/cloud/rackspace/rax_network.py @@ -65,10 +65,6 @@ except ImportError: def cloud_network(module, state, label, cidr): - for arg in (state, label, cidr): - if not arg: - module.fail_json(msg='%s is required for cloud_networks' % arg) - changed = False network = None networks = [] @@ -79,6 +75,9 @@ def cloud_network(module, state, label, cidr): 'incorrectly capitalized region name.') if state == 'present': + if not cidr: + module.fail_json(msg='missing required arguments: cidr') + try: network = pyrax.cloud_networks.find_network_by_label(label) except pyrax.exceptions.NetworkNotFound: @@ -115,7 +114,7 @@ def main(): dict( state=dict(default='present', choices=['present', 'absent']), - label=dict(), + label=dict(required=True), cidr=dict() ) ) diff --git a/cloud/vmware/vsphere_guest.py b/cloud/vmware/vsphere_guest.py index 86cc9f00fa7..8ad7df41dea 100644 --- a/cloud/vmware/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -69,11 +69,13 @@ options: default: present choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured'] from_template: + version_added: "1.9" description: - Specifies if the VM should be deployed from a template (cannot be ran with state) default: no choices: ['yes', 'no'] template_src: + version_added: "1.9" description: - Name of the source template to deploy from default: None @@ -684,7 +686,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, hfmor = dcprops.hostFolder._obj # virtualmachineFolder managed object reference - if vm_extra_config['folder']: + if vm_extra_config.get('folder'): if vm_extra_config['folder'] not in vsphere_client._get_managed_objects(MORTypes.Folder).values(): vsphere_client.disconnect() module.fail_json(msg="Cannot find folder named: %s" % vm_extra_config['folder']) diff --git a/commands/command.py b/commands/command.py index c584d6feed8..131fc4c7ffc 100644 --- a/commands/command.py +++ b/commands/command.py @@ -18,6 +18,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +import copy import sys import datetime import traceback @@ -99,12 +100,22 @@ EXAMPLES = ''' creates: /path/to/database ''' +# Dict of options and their defaults +OPTIONS = {'chdir': None, + 'creates': None, + 'executable': None, + 'NO_LOG': None, + 'removes': None, + 'warn': True, + } + # This is a pretty complex regex, which functions as follows: # # 1. (^|\s) # ^ look for a space or the beginning of the line -# 2. (creates|removes|chdir|executable|NO_LOG)= -# ^ look for a valid param, followed by an '=' +# 2. ({options_list})= +# ^ expanded to (chdir|creates|executable...)= +# look for a valid param, followed by an '=' # 3. (?P[\'"])? # ^ look for an optional quote character, which can either be # a single or double quote character, and store it for later @@ -114,8 +125,11 @@ EXAMPLES = ''' # ^ a non-escaped space or a non-escaped quote of the same kind # that was matched in the first 'quote' is found, or the end of # the line is reached - -PARAM_REGEX = re.compile(r'(^|\s)(creates|removes|chdir|executable|NO_LOG|warn)=(?P[\'"])?(.*?)(?(quote)(?[\'"])?(.*?)(?(quote)(? ' + pipes.quote(target) @@ -148,42 +149,42 @@ def db_import(module, host, user, password, db_name, target, port, socket=None): if socket is not None: cmd += " --socket=%s" % pipes.quote(socket) else: - cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port)) + cmd += " --host=%s --port=%i" % (pipes.quote(host), port) cmd += " -D %s" % pipes.quote(db_name) if os.path.splitext(target)[-1] == '.gz': - gunzip_path = module.get_bin_path('gunzip') - if gunzip_path: - rc, stdout, stderr = module.run_command('%s %s' % (gunzip_path, target)) - if rc != 0: - return rc, stdout, stderr - cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) + gzip_path = module.get_bin_path('gzip') + if not gzip_path: + module.fail_json(msg="gzip command not found") + #gzip -d file (uncompress) + rc, stdout, stderr = module.run_command('%s -d %s' % (gzip_path, target)) + if rc != 0: + return rc, stdout, stderr + #Import sql + cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) + try: rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) if rc != 0: return rc, stdout, stderr - gzip_path = module.get_bin_path('gzip') - if gzip_path: - rc, stdout, stderr = module.run_command('%s %s' % (gzip_path, os.path.splitext(target)[0])) - else: - module.fail_json(msg="gzip command not found") - else: - module.fail_json(msg="gunzip command not found") + finally: + #gzip file back up + module.run_command('%s %s' % (gzip_path, os.path.splitext(target)[0])) elif os.path.splitext(target)[-1] == '.bz2': - bunzip2_path = module.get_bin_path('bunzip2') - if bunzip2_path: - rc, stdout, stderr = module.run_command('%s %s' % (bunzip2_path, target)) - if rc != 0: - return rc, stdout, stderr - cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) + bzip2_path = module.get_bin_path('bzip2') + if not bzip2_path: + module.fail_json(msg="bzip2 command not found") + #bzip2 -d file (uncompress) + rc, stdout, stderr = module.run_command('%s -d %s' % (bzip2_path, target)) + if rc != 0: + return rc, stdout, stderr + #Import sql + cmd += " < %s" % pipes.quote(os.path.splitext(target)[0]) + try: rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) if rc != 0: return rc, stdout, stderr - bzip2_path = module.get_bin_path('bzip2') - if bzip2_path: - rc, stdout, stderr = module.run_command('%s %s' % (bzip2_path, os.path.splitext(target)[0])) - else: - module.fail_json(msg="bzip2 command not found") - else: - module.fail_json(msg="bunzip2 command not found") + finally: + #bzip2 file back up + rc, stdout, stderr = module.run_command('%s %s' % (bzip2_path, os.path.splitext(target)[0])) else: cmd += " < %s" % pipes.quote(target) rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True) @@ -265,7 +266,7 @@ def main(): login_user=dict(default=None), login_password=dict(default=None), login_host=dict(default="localhost"), - login_port=dict(default="3306"), + login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), name=dict(required=True, aliases=['db']), encoding=dict(default=""), @@ -283,6 +284,10 @@ def main(): collation = module.params["collation"] state = module.params["state"] target = module.params["target"] + socket = module.params["login_unix_socket"] + login_port = module.params["login_port"] + if login_port < 0 or login_port > 65535: + module.fail_json(msg="login_port must be a valid unix port number (0-65535)") # make sure the target path is expanded for ~ and $HOME if target is not None: @@ -310,21 +315,27 @@ def main(): module.fail_json(msg="with state=%s target is required" % (state)) connect_to_db = db else: - connect_to_db = 'mysql' + connect_to_db = '' try: - if module.params["login_unix_socket"]: - db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db=connect_to_db) - elif module.params["login_port"] != "3306" and module.params["login_host"] == "localhost": + if socket: + try: + socketmode = os.stat(socket).st_mode + if not stat.S_ISSOCK(socketmode): + module.fail_json(msg="%s, is not a socket, unable to connect" % socket) + except OSError: + module.fail_json(msg="%s, does not exist, unable to connect" % socket) + db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=socket, user=login_user, passwd=login_password, db=connect_to_db) + elif login_port != 3306 and module.params["login_host"] == "localhost": module.fail_json(msg="login_host is required when login_port is defined, login_host cannot be localhost when login_port is defined") else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password, db=connect_to_db) + db_connection = MySQLdb.connect(host=module.params["login_host"], port=login_port, user=login_user, passwd=login_password, db=connect_to_db) cursor = db_connection.cursor() except Exception, e: if "Unknown database" in str(e): errno, errstr = e.args module.fail_json(msg="ERROR: %s %s" % (errno, errstr)) else: - module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check ~/.my.cnf contains credentials") + module.fail_json(msg="unable to connect, check login credentials (login_user, and login_password, which can be defined in ~/.my.cnf), check that mysql socket exists and mysql server is running") changed = False if db_exists(cursor, db): @@ -336,7 +347,7 @@ def main(): elif state == "dump": rc, stdout, stderr = db_dump(module, login_host, login_user, login_password, db, target, - port=module.params['login_port'], + port=login_port, socket=module.params['login_unix_socket']) if rc != 0: module.fail_json(msg="%s" % stderr) @@ -345,7 +356,7 @@ def main(): elif state == "import": rc, stdout, stderr = db_import(module, login_host, login_user, login_password, db, target, - port=module.params['login_port'], + port=login_port, socket=module.params['login_unix_socket']) if rc != 0: module.fail_json(msg="%s" % stderr) diff --git a/database/mysql/mysql_user.py b/database/mysql/mysql_user.py index e160fcb68f6..5901771f6ad 100644 --- a/database/mysql/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -117,6 +117,9 @@ EXAMPLES = """ # Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION' - mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present +# Modifiy user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself. +- mysql_user: name=bob append_privs=true priv=*.*:REQUIRESSL state=present + # Ensure no user named 'sally' exists, also passing in the auth credentials. - mysql_user: login_user=root login_password=123456 name=sally state=absent @@ -159,7 +162,7 @@ VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION', 'EXECUTE', 'FILE', 'CREATE TABLESPACE', 'CREATE USER', 'PROCESS', 'PROXY', 'RELOAD', 'REPLICATION CLIENT', 'REPLICATION SLAVE', 'SHOW DATABASES', 'SHUTDOWN', - 'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE',)) + 'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE', 'REQUIRESSL')) class InvalidPrivsError(Exception): pass @@ -261,6 +264,8 @@ def privileges_get(cursor, user,host): privileges = [ pick(x) for x in privileges] if "WITH GRANT OPTION" in res.group(4): privileges.append('GRANT') + if "REQUIRE SSL" in res.group(4): + privileges.append('REQUIRESSL') db = res.group(2) output[db] = privileges return output @@ -294,6 +299,11 @@ def privileges_unpack(priv): if '*.*' not in output: output['*.*'] = ['USAGE'] + # if we are only specifying something like REQUIRESSL in *.* we still need + # to add USAGE as a privilege to avoid syntax errors + if priv.find('REQUIRESSL') != -1 and 'USAGE' not in output['*.*']: + output['*.*'].append('USAGE') + return output def privileges_revoke(cursor, user,host,db_table,grant_option): @@ -313,15 +323,16 @@ def privileges_grant(cursor, user,host,db_table,priv): # Escape '%' since mysql db.execute uses a format string and the # specification of db and table often use a % (SQL wildcard) db_table = db_table.replace('%', '%%') - priv_string = ",".join(filter(lambda x: x != 'GRANT', priv)) + priv_string = ",".join(filter(lambda x: x not in [ 'GRANT', 'REQUIRESSL' ], priv)) query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))] query.append("TO %s@%s") if 'GRANT' in priv: query.append("WITH GRANT OPTION") + if 'REQUIRESSL' in priv: + query.append("REQUIRE SSL") query = ' '.join(query) cursor.execute(query, (user, host)) - def strip_quotes(s): """ Remove surrounding single or double quotes @@ -413,7 +424,7 @@ def connect(module, login_user, login_password): if module.params["login_unix_socket"]: db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") else: - db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password, db="mysql") + db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql") return db_connection.cursor() # =========================================== @@ -426,7 +437,7 @@ def main(): login_user=dict(default=None), login_password=dict(default=None), login_host=dict(default="localhost"), - login_port=dict(default="3306"), + login_port=dict(default=3306, type='int'), login_unix_socket=dict(default=None), user=dict(required=True, aliases=['name']), password=dict(default=None), @@ -487,16 +498,14 @@ def main(): if user_exists(cursor, user, host): try: changed = user_mod(cursor, user, host, password, priv, append_privs) - except SQLParseError, e: - module.fail_json(msg=str(e)) - except InvalidPrivsError, e: + except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: module.fail_json(msg=str(e)) else: if password is None: module.fail_json(msg="password parameter required when adding a user") try: changed = user_add(cursor, user, host, password, priv) - except SQLParseError, e: + except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: module.fail_json(msg=str(e)) elif state == "absent": if user_exists(cursor, user, host): diff --git a/database/postgresql/postgresql_db.py b/database/postgresql/postgresql_db.py index 941644d6fb1..4ce8e146ccd 100644 --- a/database/postgresql/postgresql_db.py +++ b/database/postgresql/postgresql_db.py @@ -275,7 +275,7 @@ def main(): kw["host"] = module.params["login_unix_socket"] try: - db_connection = psycopg2.connect(database="template1", **kw) + db_connection = psycopg2.connect(database="postgres", **kw) # Enable autocommit so we can create databases if psycopg2.__version__ >= '2.4.2': db_connection.autocommit = True diff --git a/database/postgresql/postgresql_privs.py b/database/postgresql/postgresql_privs.py index 9b9d94923bc..22a565f6b65 100644 --- a/database/postgresql/postgresql_privs.py +++ b/database/postgresql/postgresql_privs.py @@ -474,10 +474,13 @@ class Connection(object): if obj_type == 'group': set_what = ','.join(pg_quote_identifier(i, 'role') for i in obj_ids) else: + # function types are already quoted above + if obj_type != 'function': + obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids] # Note: obj_type has been checked against a set of string literals # and privs was escaped when it was parsed set_what = '%s ON %s %s' % (','.join(privs), obj_type, - ','.join(pg_quote_identifier(i, 'table') for i in obj_ids)) + ','.join(obj_ids)) # for_whom: SQL-fragment specifying for whom to set the above if roles == 'PUBLIC': diff --git a/files/acl.py b/files/acl.py index 30c533e006c..0c568ba59a5 100644 --- a/files/acl.py +++ b/files/acl.py @@ -102,6 +102,14 @@ EXAMPLES = ''' register: acl_info ''' +RETURN = ''' +acl: + description: Current acl on provided path (after changes, if any) + returned: success + type: list + sample: [ "user::rwx", "group::rwx", "other::rwx" ] +''' + def normalize_permissions(p): perms = ['-','-','-'] for char in p: @@ -111,6 +119,9 @@ def normalize_permissions(p): perms[1] = 'w' if char == 'x': perms[2] = 'x' + if char == 'X': + if perms[2] != 'x': # 'x' is more permissive + perms[2] = 'X' return ''.join(perms) def split_entry(entry): diff --git a/files/copy.py b/files/copy.py index c5aaa01b5b3..7e1ea1db9c0 100644 --- a/files/copy.py +++ b/files/copy.py @@ -108,6 +108,68 @@ EXAMPLES = ''' - copy: src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s' ''' +RETURN = ''' +dest: + description: destination file/path + returned: success + type: string + sample: "/path/to/file.txt" +src: + description: source file used for the copy on the target machine + returned: changed + type: string + sample: "/home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source" +md5sum: + description: md5 checksum of the file after running copy + returned: when supported + type: string + sample: "2a5aeecc61dc98c4d780b14b330e3282", +checksum: + description: checksum of the file after running copy + returned: success + type: string + sample: "6e642bb8dd5c2e027bf21dd923337cbb4214f827" +backup_file: + description: name of backup file created + returned: changed and if backup=yes + type: string + sample: "/path/to/file.txt.2015-02-12@22:09~" +gid: + description: group id of the file, after execution + returned: success + type: int + sample: 100 +group: + description: group of the file, after execution + returned: success + type: string + sample: "httpd" +owner: + description: owner of the file, after execution + returned: success + type: string + sample: "httpd" +uid: 100 + description: owner id of the file, after execution + returned: success + type: int + sample: 100 +mode: + description: permissions of the target, after execution + returned: success + type: string + sample: "0644" +size: + description: size of the target, after execution + returned: success + type: int + sample: 1220 +state: + description: permissions of the target, after execution + returned: success + type: string + sample: "file" +''' def split_pre_existing_dir(dirname): ''' @@ -181,7 +243,7 @@ def main(): if original_basename and dest.endswith("/"): dest = os.path.join(dest, original_basename) dirname = os.path.dirname(dest) - if not os.path.exists(dirname) and '/' in dirname: + if not os.path.exists(dirname) and os.path.isabs(dirname): (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname) os.makedirs(dirname) directory_args = module.load_file_common_arguments(module.params) diff --git a/files/fetch.py b/files/fetch.py index fd631e6ebe6..04bebd0301c 100644 --- a/files/fetch.py +++ b/files/fetch.py @@ -45,10 +45,10 @@ options: flat: version_added: "1.2" description: - Allows you to override the default behavior of prepending hostname/path/to/file to - the destination. If dest ends with '/', it will use the basename of the source - file, similar to the copy module. Obviously this is only handy if the filenames - are unique. + - Allows you to override the default behavior of prepending + hostname/path/to/file to the destination. If dest ends with '/', it + will use the basename of the source file, similar to the copy module. + Obviously this is only handy if the filenames are unique. requirements: [] author: Michael DeHaan ''' diff --git a/files/file.py b/files/file.py index e154d6ad07f..8da87b0707e 100644 --- a/files/file.py +++ b/files/file.py @@ -57,7 +57,7 @@ options: or M(template) module if you want that behavior. If C(link), the symbolic link will be created or changed. Use C(hard) for hardlinks. If C(absent), directories will be recursively deleted, and files or symlinks will be unlinked. - If C(touch) (new in 1.4), an empty file will be created if the c(path) does not + If C(touch) (new in 1.4), an empty file will be created if the C(path) does not exist, while an existing file or directory will receive updated file access and modification times (similar to the way `touch` works from the command line). required: false @@ -88,6 +88,7 @@ options: ''' EXAMPLES = ''' +# change file ownership, group and mode. When specifying mode using octal numbers, first digit should always be 0. - file: path=/etc/foo.conf owner=foo group=foo mode=0644 - file: src=/file/to/link/to dest=/path/to/symlink owner=foo group=foo state=link - file: src=/tmp/{{ item.path }} dest={{ item.dest }} state=link @@ -103,6 +104,45 @@ EXAMPLES = ''' ''' + +def get_state(path): + ''' Find out current state ''' + + if os.path.lexists(path): + if os.path.islink(path): + return 'link' + elif os.path.isdir(path): + return 'directory' + elif os.stat(path).st_nlink > 1: + return 'hard' + else: + # could be many other things, but defaulting to file + return 'file' + + return 'absent' + +def recursive_set_attributes(module, path, follow, file_args): + changed = False + for root, dirs, files in os.walk(path): + for fsobj in dirs + files: + fsname = os.path.join(root, fsobj) + if not os.path.islink(fsname): + tmp_file_args = file_args.copy() + tmp_file_args['path']=fsname + changed |= module.set_fs_attributes_if_different(tmp_file_args, changed) + else: + tmp_file_args = file_args.copy() + tmp_file_args['path']=fsname + changed |= module.set_fs_attributes_if_different(tmp_file_args, changed) + if follow: + fsname = os.path.join(root, os.readlink(fsname)) + if os.path.isdir(fsname): + changed |= recursive_set_attributes(module, fsname, follow, file_args) + tmp_file_args = file_args.copy() + tmp_file_args['path']=fsname + changed |= module.set_fs_attributes_if_different(tmp_file_args, changed) + return changed + def main(): module = AnsibleModule( @@ -143,18 +183,7 @@ def main(): pass module.exit_json(path=path, changed=False, appears_binary=appears_binary) - # Find out current state - prev_state = 'absent' - if os.path.lexists(path): - if os.path.islink(path): - prev_state = 'link' - elif os.path.isdir(path): - prev_state = 'directory' - elif os.stat(path).st_nlink > 1: - prev_state = 'hard' - else: - # could be many other things, but defaulting to file - prev_state = 'file' + prev_state = get_state(path) # state should default to file, but since that creates many conflicts, # default to 'current' when it exists. @@ -172,7 +201,7 @@ def main(): if state in ['link','hard']: if follow and state == 'link': # use the current target of the link as the source - src = os.readlink(path) + src = os.path.realpath(path) else: module.fail_json(msg='src and dest are required for creating links') @@ -212,7 +241,15 @@ def main(): module.exit_json(path=path, changed=False) elif state == 'file': + if state != prev_state: + if follow and prev_state == 'link': + # follow symlink and operate on original + path = os.path.realpath(path) + prev_state = get_state(path) + file_args['path'] = path + + if prev_state not in ['file','hard']: # file is not absent and any other state is a conflict module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state)) @@ -220,6 +257,10 @@ def main(): module.exit_json(path=path, changed=changed) elif state == 'directory': + if follow and prev_state == 'link': + path = os.path.realpath(path) + prev_state = get_state(path) + if prev_state == 'absent': if module.check_mode: module.exit_json(changed=True) @@ -247,12 +288,7 @@ def main(): changed = module.set_fs_attributes_if_different(file_args, changed) if recurse: - for root,dirs,files in os.walk( file_args['path'] ): - for fsobj in dirs + files: - fsname=os.path.join(root, fsobj) - tmp_file_args = file_args.copy() - tmp_file_args['path']=fsname - changed = module.set_fs_attributes_if_different(tmp_file_args, changed) + changed |= recursive_set_attributes(module, file_args['path'], follow, file_args) module.exit_json(path=path, changed=changed) @@ -336,13 +372,13 @@ def main(): open(path, 'w').close() except OSError, e: module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e)) - elif prev_state in ['file', 'directory']: + elif prev_state in ['file', 'directory', 'hard']: try: os.utime(path, None) except OSError, e: module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e)) else: - module.fail_json(msg='Cannot touch other than files and directories') + module.fail_json(msg='Cannot touch other than files, directories, and hardlinks (%s is %s)' % (path, prev_state)) try: module.set_fs_attributes_if_different(file_args, True) except SystemExit, e: @@ -360,5 +396,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/files/ini_file.py b/files/ini_file.py index 756f2732a84..e247c265fc8 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -97,9 +97,9 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese changed = False if (sys.version_info[0] == 2 and sys.version_info[1] >= 7) or sys.version_info[0] >= 3: - cp = ConfigParser.ConfigParser(allow_no_value=True) + cp = ConfigParser.ConfigParser(allow_no_value=True) else: - cp = ConfigParser.ConfigParser() + cp = ConfigParser.ConfigParser() cp.optionxform = identity try: @@ -126,7 +126,7 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese if state == 'present': # DEFAULT section is always there by DEFAULT, so never try to add it. - if cp.has_section(section) == False and section.upper() != 'DEFAULT': + if not cp.has_section(section) and section.upper() != 'DEFAULT': cp.add_section(section) changed = True @@ -144,7 +144,7 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese cp.set(section, option, value) changed = True - if changed: + if changed and not module.check_mode: if backup: module.backup_local(filename) @@ -152,7 +152,7 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese f = open(filename, 'w') cp.write(f) except: - module.fail_json(msg="Can't creat %s" % filename) + module.fail_json(msg="Can't create %s" % filename) return changed @@ -183,7 +183,8 @@ def main(): backup = dict(default='no', type='bool'), state = dict(default='present', choices=['present', 'absent']) ), - add_file_common_args = True + add_file_common_args = True, + supports_check_mode = True ) info = dict() diff --git a/files/lineinfile.py b/files/lineinfile.py index b9fc628e10c..2f7154e17be 100644 --- a/files/lineinfile.py +++ b/files/lineinfile.py @@ -85,8 +85,9 @@ options: default: EOF description: - Used with C(state=present). If specified, the line will be inserted - after the specified regular expression. A special value is - available; C(EOF) for inserting the line at the end of the file. + after the last match of specified regular expression. A special value is + available; C(EOF) for inserting the line at the end of the file. + If specified regular expresion has no matches, EOF will be used instead. May not be used with C(backrefs). choices: [ 'EOF', '*regex*' ] insertbefore: @@ -94,9 +95,10 @@ options: version_added: "1.1" description: - Used with C(state=present). If specified, the line will be inserted - before the specified regular expression. A value is available; - C(BOF) for inserting the line at the beginning of the file. - May not be used with C(backrefs). + before the last match of specified regular expression. A value is + available; C(BOF) for inserting the line at the beginning of the file. + If specified regular expresion has no matches, the line will be + inserted at the end of the file. May not be used with C(backrefs). choices: [ 'BOF', '*regex*' ] create: required: false @@ -256,9 +258,9 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create, msg = 'line added' changed = True # Add it to the end of the file if requested or - # if insertafter=/insertbefore didn't match anything + # if insertafter/insertbefore didn't match anything # (so default behaviour is to add at the end) - elif insertafter == 'EOF': + elif insertafter == 'EOF' or index[1] == -1: # If the file is not empty then ensure there's a newline before the added line if len(lines)>0 and not (lines[-1].endswith('\n') or lines[-1].endswith('\r')): @@ -267,9 +269,6 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create, lines.append(line + os.linesep) msg = 'line added' changed = True - # Do nothing if insert* didn't match - elif index[1] == -1: - pass # insert* matched, but not the regexp else: lines.insert(index[1], line + os.linesep) diff --git a/files/replace.py b/files/replace.py index b7b75a9604d..588af02391e 100644 --- a/files/replace.py +++ b/files/replace.py @@ -152,6 +152,8 @@ def main(): if changed and not module.check_mode: if params['backup'] and os.path.exists(dest): module.backup_local(dest) + if params['follow'] and os.path.islink(dest): + dest = os.path.realpath(dest) write_changes(module, result[0], dest) msg, changed = check_file_attrs(module, changed, msg) diff --git a/files/stat.py b/files/stat.py index 484da2136d9..fbf2d4cb8f6 100644 --- a/files/stat.py +++ b/files/stat.py @@ -69,6 +69,189 @@ EXAMPLES = ''' - stat: path=/path/to/myhugefile get_md5=no ''' +RETURN = ''' +stat: + description: dictionary containing all the stat data + returned: success + type: dictionary + contains: + exists: + description: if the destination path actually exists or not + returned: success + type: boolean + sample: True + path: + description: The full path of the file/object to get the facts of + returned: success and if path exists + type: boolean + sample: '/path/to/file' + mode: + description: Unix permissions of the file in octal + returned: success, path exists and user can read stats + type: octal + sample: 1755 + isdir: + description: Tells you if the path is a directory + returned: success, path exists and user can read stats + type: boolean + sample: False + ischr: + description: Tells you if the path is a character device + returned: success, path exists and user can read stats + type: boolean + sample: False + isblk: + description: Tells you if the path is a block device + returned: success, path exists and user can read stats + type: boolean + sample: False + isreg: + description: Tells you if the path is a regular file + returned: success, path exists and user can read stats + type: boolean + sample: True + isfifo: + description: Tells you if the path is a named pipe + returned: success, path exists and user can read stats + type: boolean + sample: False + islnk: + description: Tells you if the path is a symbolic link + returned: success, path exists and user can read stats + type: boolean + sample: False + issock: + description: Tells you if the path is a unix domain socket + returned: success, path exists and user can read stats + type: boolean + sample: False + uid: + description: Numeric id representing the file owner + returned: success, path exists and user can read stats + type: int + sample: 1003 + gid: + description: Numeric id representing the group of the owner + returned: success, path exists and user can read stats + type: int + sample: 1003 + size: + description: Size in bytes for a plain file, ammount of data for some special files + returned: success, path exists and user can read stats + type: int + sample: 203 + inode: + description: Inode number of the path + returned: success, path exists and user can read stats + type: int + sample: 12758 + dev: + description: Device the inode resides on + returned: success, path exists and user can read stats + type: int + sample: 33 + nlink: + description: Number of links to the inode (hard links) + returned: success, path exists and user can read stats + type: int + sample: 1 + atime: + description: Time of last access + returned: success, path exists and user can read stats + type: float + sample: 1424348972.575 + mtime: + description: Time of last modification + returned: success, path exists and user can read stats + type: float + sample: 1424348972.575 + ctime: + description: Time of last metadata update or creation (depends on OS) + returned: success, path exists and user can read stats + type: float + sample: 1424348972.575 + wusr: + description: Tells you if the owner has write permission + returned: success, path exists and user can read stats + type: boolean + sample: True + rusr: + description: Tells you if the owner has read permission + returned: success, path exists and user can read stats + type: boolean + sample: True + xusr: + description: Tells you if the owner has execute permission + returned: success, path exists and user can read stats + type: boolean + sample: True + wgrp: + description: Tells you if the owner's group has write permission + returned: success, path exists and user can read stats + type: boolean + sample: False + rgrp: + description: Tells you if the owner's group has read permission + returned: success, path exists and user can read stats + type: boolean + sample: True + xgrp: + description: Tells you if the owner's group has execute permission + returned: success, path exists and user can read stats + type: boolean + sample: True + woth: + description: Tells you if others have write permission + returned: success, path exists and user can read stats + type: boolean + sample: False + roth: + description: Tells you if others have read permission + returned: success, path exists and user can read stats + type: boolean + sample: True + xoth: + description: Tells you if others have execute permission + returned: success, path exists and user can read stats + type: boolean + sample: True + isuid: + description: Tells you if the invoking user's id matches the owner's id + returned: success, path exists and user can read stats + type: boolean + sample: False + isgid: + description: Tells you if the invoking user's group id matches the owner's group id + returned: success, path exists and user can read stats + type: boolean + sample: False + lnk_source: + description: Original path + returned: success, path exists and user can read stats and the path is a symbolic link + type: boolean + sample: True + md5: + description: md5 hash of the path + returned: success, path exists and user can read stats and path supports hashing and md5 is supported + type: boolean + sample: True + checksum: + description: hash of the path + returned: success, path exists and user can read stats and path supports hashing + type: boolean + sample: True + pw_name: + description: User name of owner + returned: success, path exists and user can read stats and installed python supports it + type: string + sample: httpd + gr_name: + description: Group name of owner + returned: success, path exists and user can read stats and installed python supports it + type: string + sample: www-data +''' + import os import sys from stat import * diff --git a/files/synchronize.py b/files/synchronize.py index a2138b3410d..d29a900593b 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -255,7 +255,7 @@ def main(): group = module.params['group'] rsync_opts = module.params['rsync_opts'] - cmd = '%s --delay-updates -FF' % rsync + cmd = '%s --delay-updates -F' % rsync if compress: cmd = cmd + ' --compress' if rsync_timeout: diff --git a/files/unarchive.py b/files/unarchive.py index f46e52e02a3..7804d1bc02c 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -23,14 +23,14 @@ DOCUMENTATION = ''' --- module: unarchive version_added: 1.4 -short_description: Copies an archive to a remote location and unpack it +short_description: Unpacks an archive after (optionally) copying it from the local machine. extends_documentation_fragment: files description: - - The M(unarchive) module copies an archive file from the local machine to a remote and unpacks it. + - The M(unarchive) module unpacks an archive. By default, it will copy the source file from the local system to the target before unpacking - set copy=no to unpack an archive which already exists on the target.. options: src: description: - - Local path to archive file to copy to the remote server; can be absolute or relative. + - If copy=yes (default), local path to archive file to copy to the target server; can be absolute or relative. If copy=no, path on the target server to existing archive file to unpack. required: true default: null dest: @@ -40,7 +40,7 @@ options: default: null copy: description: - - "if true, the file is copied from the 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine." + - "If true, the file is copied from local 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine." required: false choices: [ "yes", "no" ] default: "yes" @@ -76,18 +76,35 @@ EXAMPLES = ''' ''' import os +from zipfile import ZipFile +class UnarchiveError(Exception): + pass # class to handle .zip files -class ZipFile(object): - +class ZipArchive(object): + def __init__(self, src, dest, module): self.src = src self.dest = dest self.module = module self.cmd_path = self.module.get_bin_path('unzip') + self._files_in_archive = [] + + @property + def files_in_archive(self, force_refresh=False): + if self._files_in_archive and not force_refresh: + return self._files_in_archive + + archive = ZipFile(self.src) + try: + self._files_in_archive = archive.namelist() + except: + raise UnarchiveError('Unable to list files in the archive') + + return self._files_in_archive - def is_unarchived(self): + def is_unarchived(self, mode, owner, group): return dict(unarchived=False) def unarchive(self): @@ -106,19 +123,61 @@ class ZipFile(object): # class to handle gzipped tar files -class TgzFile(object): - +class TgzArchive(object): + def __init__(self, src, dest, module): self.src = src self.dest = dest self.module = module - self.cmd_path = self.module.get_bin_path('tar') + # Prefer gtar (GNU tar) as it supports the compression options -zjJ + self.cmd_path = self.module.get_bin_path('gtar', None) + if not self.cmd_path: + # Fallback to tar + self.cmd_path = self.module.get_bin_path('tar') self.zipflag = 'z' + self._files_in_archive = [] + + @property + def files_in_archive(self, force_refresh=False): + if self._files_in_archive and not force_refresh: + return self._files_in_archive - def is_unarchived(self): - cmd = '%s -v -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src) + cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src) + rc, out, err = self.module.run_command(cmd) + if rc != 0: + raise UnarchiveError('Unable to list files in the archive') + + for filename in out.splitlines(): + if filename: + self._files_in_archive.append(filename) + return self._files_in_archive + + def is_unarchived(self, mode, owner, group): + cmd = '%s -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src) rc, out, err = self.module.run_command(cmd) unarchived = (rc == 0) + if not unarchived: + # Check whether the differences are in something that we're + # setting anyway + + # What will be set + to_be_set = set() + for perm in (('Mode', mode), ('Gid', group), ('Uid', owner)): + if perm[1] is not None: + to_be_set.add(perm[0]) + + # What is different + changes = set() + difference_re = re.compile(r': (.*) differs$') + for line in out.splitlines(): + match = difference_re.search(line) + if not match: + # Unknown tar output. Assume we have changes + return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd) + changes.add(match.groups()[0]) + + if changes and changes.issubset(to_be_set): + unarchived = True return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd) def unarchive(self): @@ -129,47 +188,41 @@ class TgzFile(object): def can_handle_archive(self): if not self.cmd_path: return False - cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src) - rc, out, err = self.module.run_command(cmd) - if rc == 0: - if len(out.splitlines(True)) > 0: + + try: + if self.files_in_archive: return True + except UnarchiveError: + pass + # Errors and no files in archive assume that we weren't able to + # properly unarchive it return False # class to handle tar files that aren't compressed -class TarFile(TgzFile): +class TarArchive(TgzArchive): def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') + super(TarArchive, self).__init__(src, dest, module) self.zipflag = '' # class to handle bzip2 compressed tar files -class TarBzip(TgzFile): +class TarBzipArchive(TgzArchive): def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') + super(TarBzipArchive, self).__init__(src, dest, module) self.zipflag = 'j' # class to handle xz compressed tar files -class TarXz(TgzFile): +class TarXzArchive(TgzArchive): def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') + super(TarXzArchive, self).__init__(src, dest, module) self.zipflag = 'J' # try handlers in order and return the one that works or bail if none work def pick_handler(src, dest, module): - handlers = [TgzFile, ZipFile, TarFile, TarBzip, TarXz] + handlers = [TgzArchive, ZipArchive, TarArchive, TarBzipArchive, TarXzArchive] for handler in handlers: obj = handler(src, dest, module) if obj.can_handle_archive(): @@ -193,6 +246,7 @@ def main(): src = os.path.expanduser(module.params['src']) dest = os.path.expanduser(module.params['dest']) copy = module.params['copy'] + file_args = module.load_file_common_arguments(module.params) # did tar file arrive? if not os.path.exists(src): @@ -214,23 +268,29 @@ def main(): res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src) # do we need to do unpack? - res_args['check_results'] = handler.is_unarchived() + res_args['check_results'] = handler.is_unarchived(file_args['mode'], + file_args['owner'], file_args['group']) if res_args['check_results']['unarchived']: res_args['changed'] = False - module.exit_json(**res_args) - - # do the unpack - try: - res_args['extract_results'] = handler.unarchive() - if res_args['extract_results']['rc'] != 0: - module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args) - except IOError: - module.fail_json(msg="failed to unpack %s to %s" % (src, dest)) + else: + # do the unpack + try: + res_args['extract_results'] = handler.unarchive() + if res_args['extract_results']['rc'] != 0: + module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args) + except IOError: + module.fail_json(msg="failed to unpack %s to %s" % (src, dest)) + else: + res_args['changed'] = True - res_args['changed'] = True + # do we need to change perms? + for filename in handler.files_in_archive: + file_args['path'] = os.path.join(dest, filename) + res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed']) module.exit_json(**res_args) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/inventory/add_host.py b/inventory/add_host.py index 4fd4e1eb15f..c7e066b74ee 100644 --- a/inventory/add_host.py +++ b/inventory/add_host.py @@ -5,7 +5,7 @@ DOCUMENTATION = ''' module: add_host short_description: add a host (and alternatively a group) to the ansible-playbook in-memory inventory description: - - Use variables to create new hosts and groups in inventory for use in later plays of the same playbook. + - Use variables to create new hosts and groups in inventory for use in later plays of the same playbook. Takes variables so you can define the new hosts more fully. version_added: "0.9" options: @@ -13,12 +13,15 @@ options: aliases: [ 'hostname', 'host' ] description: - The hostname/ip of the host to add to the inventory, can include a colon and a port number. - required: true + required: true groups: aliases: [ 'groupname', 'group' ] description: - The groups to add the hostname to, comma separated. required: false +notes: + - This module bypasses the play host loop and only runs once for all the hosts in the play, if you need it + to iterate use a with_ directive. author: Seth Vidal ''' diff --git a/network/basics/uri.py b/network/basics/uri.py index aac724a8f13..9be0a06cdce 100644 --- a/network/basics/uri.py +++ b/network/basics/uri.py @@ -381,7 +381,7 @@ def main(): # of uri executions. creates = os.path.expanduser(creates) if os.path.exists(creates): - module.exit_json(stdout="skipped, since %s exists" % creates, skipped=True, changed=False, stderr=False, rc=0) + module.exit_json(stdout="skipped, since %s exists" % creates, changed=False, stderr=False, rc=0) if removes is not None: # do not run the command if the line contains removes=filename @@ -389,7 +389,7 @@ def main(): # of uri executions. v = os.path.expanduser(removes) if not os.path.exists(removes): - module.exit_json(stdout="skipped, since %s does not exist" % removes, skipped=True, changed=False, stderr=False, rc=0) + module.exit_json(stdout="skipped, since %s does not exist" % removes, changed=False, stderr=False, rc=0) # httplib2 only sends authentication after the server asks for it with a 401. diff --git a/packaging/language/pip.py b/packaging/language/pip.py index 17f52c00398..d9ecc17af64 100644 --- a/packaging/language/pip.py +++ b/packaging/language/pip.py @@ -98,7 +98,7 @@ options: required: false default: null notes: - - Please note that virtualenv (U(http://www.virtualenv.org/)) must be installed on the remote host if the virtualenv parameter is specified. + - Please note that virtualenv (U(http://www.virtualenv.org/)) must be installed on the remote host if the virtualenv parameter is specified and the virtualenv needs to be initialized. requirements: [ "virtualenv", "pip" ] author: Matt Wright ''' @@ -113,6 +113,9 @@ EXAMPLES = ''' # Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args. - pip: name='svn+http://myrepo/svn/MyApp#egg=MyApp' +# Install (MyApp) from local tarball +- pip: name='file:///path/to/MyApp.tar.gz' + # Install (Bottle) into the specified (virtualenv), inheriting none of the globally installed modules - pip: name=bottle virtualenv=/my_app/venv @@ -252,12 +255,14 @@ def main(): if env: env = os.path.expanduser(env) - virtualenv = os.path.expanduser(virtualenv_command) - if os.path.basename(virtualenv) == virtualenv: - virtualenv = module.get_bin_path(virtualenv_command, True) if not os.path.exists(os.path.join(env, 'bin', 'activate')): if module.check_mode: module.exit_json(changed=True) + + virtualenv = os.path.expanduser(virtualenv_command) + if os.path.basename(virtualenv) == virtualenv: + virtualenv = module.get_bin_path(virtualenv_command, True) + if module.params['virtualenv_site_packages']: cmd = '%s --system-site-packages %s' % (virtualenv, env) else: @@ -278,7 +283,7 @@ def main(): pip = _get_pip(module, env, module.params['executable']) cmd = '%s %s' % (pip, state_map[state]) - + # If there's a virtualenv we want things we install to be able to use other # installations that exist as binaries within this virtualenv. Example: we # install cython and then gevent -- gevent needs to use the cython binary, @@ -308,7 +313,7 @@ def main(): cmd += ' %s' % _get_full_name(name, version) elif requirements: cmd += ' -r %s' % requirements - + this_dir = tempfile.gettempdir() if chdir: this_dir = os.path.join(this_dir, chdir) @@ -319,7 +324,7 @@ def main(): elif name.startswith('svn+') or name.startswith('git+') or \ name.startswith('hg+') or name.startswith('bzr+'): module.exit_json(changed=True) - + freeze_cmd = '%s freeze' % pip rc, out_pip, err_pip = module.run_command(freeze_cmd, cwd=this_dir) diff --git a/packaging/os/apt.py b/packaging/os/apt.py index 6e2f26f7237..8730e22e35d 100644 --- a/packaging/os/apt.py +++ b/packaging/os/apt.py @@ -34,10 +34,10 @@ options: default: null state: description: - - Indicates the desired package state. C(latest) ensures that the latest version is installed. + - Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies are installed. required: false default: present - choices: [ "latest", "absent", "present" ] + choices: [ "latest", "absent", "present", "build-dep" ] update_cache: description: - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step. @@ -133,6 +133,9 @@ EXAMPLES = ''' # Install a .deb package - apt: deb=/tmp/mypackage.deb + +# Install the build dependencies for package "foo" +- apt: pkg=foo state=build-dep ''' @@ -144,6 +147,7 @@ warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning) import os import datetime import fnmatch +import itertools # APT related constants APT_ENV_VARS = dict( @@ -173,6 +177,24 @@ def package_split(pkgspec): else: return parts[0], None +def package_versions(pkgname, pkg, pkg_cache): + try: + versions = set(p.version for p in pkg.versions) + except AttributeError: + # assume older version of python-apt is installed + # apt.package.Package#versions require python-apt >= 0.7.9. + pkg_cache_list = (p for p in pkg_cache.Packages if p.Name == pkgname) + pkg_versions = (p.VersionList for p in pkg_cache_list) + versions = set(p.VerStr for p in itertools.chain(*pkg_versions)) + + return versions + +def package_version_compare(version, other_version): + try: + return apt_pkg.version_compare(version, other_version) + except AttributeError: + return apt_pkg.VersionCompare(version, other_version) + def package_status(m, pkgname, version, cache, state): try: # get the package from the cache, as well as the @@ -183,9 +205,14 @@ def package_status(m, pkgname, version, cache, state): ll_pkg = cache._cache[pkgname] # the low-level package object except KeyError: if state == 'install': - if cache.get_providing_packages(pkgname): + try: + if cache.get_providing_packages(pkgname): + return False, True, False + m.fail_json(msg="No package matching '%s' is available" % pkgname) + except AttributeError: + # python-apt version too old to detect virtual packages + # mark as upgradable and let apt-get install deal with it return False, True, False - m.fail_json(msg="No package matching '%s' is available" % pkgname) else: return False, False, False try: @@ -206,7 +233,8 @@ def package_status(m, pkgname, version, cache, state): package_is_installed = pkg.isInstalled if version: - avail_upgrades = fnmatch.filter((p.version for p in pkg.versions), version) + versions = package_versions(pkgname, pkg, cache._cache) + avail_upgrades = fnmatch.filter(versions, version) if package_is_installed: try: @@ -220,7 +248,7 @@ def package_status(m, pkgname, version, cache, state): # Only claim the package is upgradable if a candidate matches the version package_is_upgradable = False for candidate in avail_upgrades: - if pkg.versions[candidate] > pkg.installed: + if package_version_compare(candidate, installed_version) > 0: package_is_upgradable = True break else: @@ -274,13 +302,18 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache): def install(m, pkgspec, cache, upgrade=False, default_release=None, install_recommends=True, force=False, - dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): + dpkg_options=expand_dpkg_options(DPKG_OPTIONS), + build_dep=False): pkg_list = [] packages = "" pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) for package in pkgspec: name, version = package_split(package) installed, upgradable, has_files = package_status(m, name, version, cache, state='install') + if build_dep: + # Let apt decide what to install + pkg_list.append("'%s'" % package) + continue if not installed or (upgrade and upgradable): pkg_list.append("'%s'" % package) if installed and upgradable and version: @@ -307,7 +340,10 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None, for (k,v) in APT_ENV_VARS.iteritems(): os.environ[k] = v - cmd = "%s -y %s %s %s install %s" % (APT_GET_CMD, dpkg_options, force_yes, check_arg, packages) + if build_dep: + cmd = "%s -y %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, force_yes, check_arg, packages) + else: + cmd = "%s -y %s %s %s install %s" % (APT_GET_CMD, dpkg_options, force_yes, check_arg, packages) if default_release: cmd += " -t '%s'" % (default_release,) @@ -316,7 +352,7 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None, rc, out, err = m.run_command(cmd) if rc: - return (False, dict(msg="'apt-get install %s' failed: %s" % (packages, err), stdout=out, stderr=err)) + return (False, dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err)) else: return (True, dict(changed=True, stdout=out, stderr=err)) else: @@ -327,7 +363,10 @@ def install_deb(m, debs, cache, force, install_recommends, dpkg_options): deps_to_install = [] pkgs_to_install = [] for deb_file in debs.split(','): - pkg = apt.debfile.DebPackage(deb_file) + try: + pkg = apt.debfile.DebPackage(deb_file) + except SystemError, e: + m.fail_json(msg="Error: %s\nSystem Error: %s" % (pkg._failure_string,str(e))) # Check if it's already installed if pkg.compare_to_version_in_cache() == pkg.VERSION_SAME: @@ -358,7 +397,7 @@ def install_deb(m, debs, cache, force, install_recommends, dpkg_options): if m.check_mode: options += " --simulate" if force: - options += " --force-yes" + options += " --force-all" cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install)) rc, out, err = m.run_command(cmd) @@ -462,7 +501,7 @@ def upgrade(m, mode="yes", force=False, default_release=None, def main(): module = AnsibleModule( argument_spec = dict( - state = dict(default='present', choices=['installed', 'latest', 'removed', 'absent', 'present']), + state = dict(default='present', choices=['installed', 'latest', 'removed', 'absent', 'present', 'build-dep']), update_cache = dict(default=False, aliases=['update-cache'], type='bool'), cache_valid_time = dict(type='int'), purge = dict(default=False, type='bool'), @@ -570,20 +609,18 @@ def main(): if latest and '=' in package: module.fail_json(msg='version number inconsistent with state=latest: %s' % package) - if p['state'] == 'latest': - result = install(module, packages, cache, upgrade=True, + if p['state'] in ('latest', 'present', 'build-dep'): + state_upgrade = False + state_builddep = False + if p['state'] == 'latest': + state_upgrade = True + if p['state'] == 'build-dep': + state_builddep = True + result = install(module, packages, cache, upgrade=state_upgrade, default_release=p['default_release'], install_recommends=install_recommends, - force=force_yes, dpkg_options=dpkg_options) - (success, retvals) = result - if success: - module.exit_json(**retvals) - else: - module.fail_json(**retvals) - elif p['state'] == 'present': - result = install(module, packages, cache, default_release=p['default_release'], - install_recommends=install_recommends,force=force_yes, - dpkg_options=dpkg_options) + force=force_yes, dpkg_options=dpkg_options, + build_dep=state_builddep) (success, retvals) = result if success: module.exit_json(**retvals) @@ -594,6 +631,8 @@ def main(): except apt.cache.LockFailedException: module.fail_json(msg="Failed to lock apt for exclusive operation") + except apt.cache.FetchFailedException: + module.fail_json(msg="Could not fetch updated apt files") # import module snippets from ansible.module_utils.basic import * diff --git a/packaging/os/rhn_channel.py b/packaging/os/rhn_channel.py index 05a155f7ca1..42d61f36e66 100644 --- a/packaging/os/rhn_channel.py +++ b/packaging/os/rhn_channel.py @@ -90,17 +90,17 @@ def get_systemid(client, session, sysname): # ------------------------------------------------------- # -def subscribe_channels(channels, client, session, sysname, sys_id): - c = base_channels(client, session, sys_id) - c.append(channels) - return client.channel.software.setSystemChannels(session, sys_id, c) +def subscribe_channels(channelname, client, session, sysname, sys_id): + channels = base_channels(client, session, sys_id) + channels.append(channelname) + return client.system.setChildChannels(session, sys_id, channels) # ------------------------------------------------------- # -def unsubscribe_channels(channels, client, session, sysname, sys_id): - c = base_channels(client, session, sys_id) - c.remove(channels) - return client.channel.software.setSystemChannels(session, sys_id, c) +def unsubscribe_channels(channelname, client, session, sysname, sys_id): + channels = base_channels(client, session, sys_id) + channels.remove(channelname) + return client.system.setChildChannels(session, sys_id, channels) # ------------------------------------------------------- # @@ -167,3 +167,4 @@ def main(): # import module snippets from ansible.module_utils.basic import * main() + diff --git a/packaging/os/yum.py b/packaging/os/yum.py index 73fbb699e75..671dbf7fde3 100644 --- a/packaging/os/yum.py +++ b/packaging/os/yum.py @@ -25,6 +25,7 @@ import traceback import os import yum +import rpm try: from yum.misc import find_unfinished_transactions, find_ts_remaining @@ -108,7 +109,7 @@ options: notes: [] # informational: requirements for nodes -requirements: [ yum, rpm ] +requirements: [ yum ] author: Seth Vidal ''' @@ -122,6 +123,9 @@ EXAMPLES = ''' - name: install the latest version of Apache from the testing repo yum: name=httpd enablerepo=testing state=present +- name: install one specific version of Apache + yum: name=httpd-2.2.29-1.4.amzn1 state=present + - name: upgrade all packages yum: name=* state=latest @@ -149,21 +153,13 @@ def log(msg): syslog.openlog('ansible-yum', 0, syslog.LOG_USER) syslog.syslog(syslog.LOG_NOTICE, msg) -def yum_base(conf_file=None, cachedir=False): +def yum_base(conf_file=None): my = yum.YumBase() my.preconf.debuglevel=0 my.preconf.errorlevel=0 if conf_file and os.path.exists(conf_file): my.preconf.fn = conf_file - if cachedir or os.geteuid() != 0: - if hasattr(my, 'setCacheDir'): - my.setCacheDir() - else: - cachedir = yum.misc.getCacheDir() - my.repos.setCacheDir(cachedir) - my.conf.cache = 0 - return my def install_yum_utils(module): @@ -247,13 +243,11 @@ def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_ else: myrepoq = list(repoq) - for repoid in dis_repos: - r_cmd = ['--disablerepo', repoid] - myrepoq.extend(r_cmd) + r_cmd = ['--disablerepo', ','.join(dis_repos)] + myrepoq.extend(r_cmd) - for repoid in en_repos: - r_cmd = ['--enablerepo', repoid] - myrepoq.extend(r_cmd) + r_cmd = ['--enablerepo', ','.join(en_repos)] + myrepoq.extend(r_cmd) cmd = myrepoq + ["--qf", qf, pkgspec] rc,out,err = module.run_command(cmd) @@ -296,13 +290,11 @@ def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_rep else: myrepoq = list(repoq) - for repoid in dis_repos: - r_cmd = ['--disablerepo', repoid] - myrepoq.extend(r_cmd) + r_cmd = ['--disablerepo', ','.join(dis_repos)] + myrepoq.extend(r_cmd) - for repoid in en_repos: - r_cmd = ['--enablerepo', repoid] - myrepoq.extend(r_cmd) + r_cmd = ['--enablerepo', ','.join(en_repos)] + myrepoq.extend(r_cmd) cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec] rc,out,err = module.run_command(cmd) @@ -341,13 +333,11 @@ def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=[], d else: myrepoq = list(repoq) - for repoid in dis_repos: - r_cmd = ['--disablerepo', repoid] - myrepoq.extend(r_cmd) + r_cmd = ['--disablerepo', ','.join(dis_repos)] + myrepoq.extend(r_cmd) - for repoid in en_repos: - r_cmd = ['--enablerepo', repoid] - myrepoq.extend(r_cmd) + r_cmd = ['--enablerepo', ','.join(en_repos)] + myrepoq.extend(r_cmd) cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec] rc,out,err = module.run_command(cmd) @@ -405,14 +395,19 @@ def transaction_exists(pkglist): def local_nvra(module, path): """return nvra of a local rpm passed in""" - - cmd = ['/bin/rpm', '-qp' ,'--qf', - '%{name}-%{version}-%{release}.%{arch}\n', path ] - rc, out, err = module.run_command(cmd) - if rc != 0: - return None - nvra = out.split('\n')[0] - return nvra + + ts = rpm.TransactionSet() + ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES) + fd = os.open(path, os.O_RDONLY) + try: + header = ts.hdrFromFdno(fd) + finally: + os.close(fd) + + return '%s-%s-%s.%s' % (header[rpm.RPMTAG_NAME], + header[rpm.RPMTAG_VERSION], + header[rpm.RPMTAG_RELEASE], + header[rpm.RPMTAG_ARCH]) def pkg_to_dict(pkgstr): @@ -682,7 +677,7 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos): nothing_to_do = False break - if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos): + if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=en_repos): nothing_to_do = False break @@ -744,16 +739,14 @@ def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo, en_repos = [] if disablerepo: dis_repos = disablerepo.split(',') + r_cmd = ['--disablerepo=%s' % disablerepo] + yum_basecmd.extend(r_cmd) if enablerepo: en_repos = enablerepo.split(',') - - for repoid in dis_repos: - r_cmd = ['--disablerepo=%s' % repoid] + r_cmd = ['--enablerepo=%s' % enablerepo] yum_basecmd.extend(r_cmd) + - for repoid in en_repos: - r_cmd = ['--enablerepo=%s' % repoid] - yum_basecmd.extend(r_cmd) if state in ['installed', 'present', 'latest']: @@ -762,13 +755,12 @@ def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo, my = yum_base(conf_file) try: - for r in dis_repos: - my.repos.disableRepo(r) - + if disablerepo: + my.repos.disableRepo(disablerepo) current_repos = my.repos.repos.keys() - for r in en_repos: + if enablerepo: try: - my.repos.enableRepo(r) + my.repos.enableRepo(enablerepo) new_repos = my.repos.repos.keys() for i in new_repos: if not i in current_repos: @@ -779,7 +771,6 @@ def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo, module.fail_json(msg="Error setting/accessing repo %s: %s" % (r, e)) except yum.Errors.YumBaseError, e: module.fail_json(msg="Error accessing repos: %s" % e) - if state in ['installed', 'present']: if disable_gpg_check: yum_basecmd.append('--nogpgcheck') diff --git a/source_control/git.py b/source_control/git.py index ee8b9a6d29a..0cb87304a92 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -80,15 +80,27 @@ options: default: "origin" description: - Name of the remote. + refspec: + required: false + default: null + version_added: "1.9" + description: + - Add an additional refspec to be fetched. + If version is set to a I(SHA-1) not reachable from any branch + or tag, this option may be necessary to specify the ref containing + the I(SHA-1). + Uses the same syntax as the 'git fetch' command. + An example value could be "refs/meta/config". force: required: false - default: "yes" + default: "no" choices: [ "yes", "no" ] version_added: "0.7" description: - If C(yes), any modified files in the working repository will be discarded. Prior to 0.7, this was always - 'yes' and could not be disabled. + 'yes' and could not be disabled. Prior to 1.9, the default was + `yes` depth: required: false default: null @@ -170,6 +182,9 @@ EXAMPLES = ''' # Example just get information about the repository whether or not it has # already been cloned locally. - git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout clone=no update=no + +# Example checkout a github repo and use refspec to fetch all pull requests +- git: repo=https://github.com/ansible/ansible-examples.git dest=/src/ansible-examples refspec=+refs/pull/*:refs/heads/* ''' import re @@ -283,7 +298,7 @@ def get_submodule_versions(git_path, module, dest, version='HEAD'): return submodules def clone(git_path, module, repo, dest, remote, depth, version, bare, - reference): + reference, refspec): ''' makes a new git repo if it does not already exist ''' dest_dirname = os.path.dirname(dest) try: @@ -308,6 +323,9 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare, if remote != 'origin': module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest) + if refspec: + module.run_command([git_path, 'fetch', remote, refspec], check_rc=True, cwd=dest) + def has_local_mods(module, git_path, dest, bare): if bare: return False @@ -455,35 +473,31 @@ def get_head_branch(git_path, module, dest, remote, bare=False): f.close() return branch -def fetch(git_path, module, repo, dest, version, remote, bare): +def fetch(git_path, module, repo, dest, version, remote, bare, refspec): ''' updates repo from remote sources ''' - out_acc = [] - err_acc = [] - (rc, out0, err0) = module.run_command([git_path, 'remote', 'set-url', remote, repo], cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to set a new url %s for %s: %s" % (repo, remote, out0 + err0)) - if bare: - (rc, out1, err1) = module.run_command([git_path, 'fetch', remote, '+refs/heads/*:refs/heads/*'], cwd=dest) - else: - (rc, out1, err1) = module.run_command("%s fetch %s" % (git_path, remote), cwd=dest) - out_acc.append(out1) - err_acc.append(err1) - if rc != 0: - module.fail_json(msg="Failed to download remote objects and refs: %s %s" % - (''.join(out_acc), ''.join(err_acc))) + commands = [("set a new url %s for %s" % (repo, remote), [git_path, 'remote', 'set-url', remote, repo])] + + fetch_str = 'download remote objects and refs' if bare: - (rc, out2, err2) = module.run_command([git_path, 'fetch', remote, '+refs/tags/*:refs/tags/*'], cwd=dest) + refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*'] + if refspec: + refspecs.append(refspec) + commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs)) else: - (rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote), cwd=dest) - out_acc.append(out2) - err_acc.append(err2) - if rc != 0: - module.fail_json(msg="Failed to download remote objects and refs: %s %s" % - (''.join(out_acc), ''.join(err_acc))) - - return (rc, ''.join(out_acc), ''.join(err_acc)) - + # unlike in bare mode, there's no way to combine the + # additional refspec with the default git fetch behavior, + # so use two commands + commands.append((fetch_str, [git_path, 'fetch', remote])) + refspecs = ['+refs/tags/*:refs/tags/*'] + if refspec: + refspecs.append(refspec) + commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs)) + + for (label,command) in commands: + (rc,out,err) = module.run_command(command, cwd=dest) + if rc != 0: + module.fail_json(msg="Failed to %s: %s %s" % (label, out, err)) def submodules_fetch(git_path, module, remote, track_submodules, dest): changed = False @@ -596,8 +610,9 @@ def main(): repo=dict(required=True, aliases=['name']), version=dict(default='HEAD'), remote=dict(default='origin'), + refspec=dict(default=None), reference=dict(default=None), - force=dict(default='yes', type='bool'), + force=dict(default='no', type='bool'), depth=dict(default=None, type='int'), clone=dict(default='yes', type='bool'), update=dict(default='yes', type='bool'), @@ -616,6 +631,7 @@ def main(): repo = module.params['repo'] version = module.params['version'] remote = module.params['remote'] + refspec = module.params['refspec'] force = module.params['force'] depth = module.params['depth'] update = module.params['update'] @@ -673,7 +689,7 @@ def main(): remote_head = get_remote_head(git_path, module, dest, version, repo, bare) module.exit_json(changed=True, before=before, after=remote_head) # there's no git config, so clone - clone(git_path, module, repo, dest, remote, depth, version, bare, reference) + clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec) repo_updated = True elif not update: # Just return having found a repo already in the dest path @@ -707,7 +723,7 @@ def main(): if repo_updated is None: if module.check_mode: module.exit_json(changed=True, before=before, after=remote_head) - fetch(git_path, module, repo, dest, version, remote, bare) + fetch(git_path, module, repo, dest, version, remote, bare, refspec) repo_updated = True # switch to version specified regardless of whether diff --git a/source_control/hg.py b/source_control/hg.py index 1b95bcd5ac3..d83215fabe1 100644 --- a/source_control/hg.py +++ b/source_control/hg.py @@ -2,6 +2,7 @@ #-*- coding: utf-8 -*- # (c) 2013, Yeukhon Wong +# (c) 2014, Nate Coraor # # This module was originally inspired by Brad Olson's ansible-module-mercurial # . This module tends @@ -49,13 +50,14 @@ options: - Equivalent C(-r) option in hg command which could be the changeset, revision number, branch name or even tag. required: false - default: "default" + default: null aliases: [ version ] force: description: - - Discards uncommitted changes. Runs C(hg update -C). + - Discards uncommitted changes. Runs C(hg update -C). Prior to + 1.9, the default was `yes`. required: false - default: "yes" + default: "no" choices: [ "yes", "no" ] purge: description: @@ -128,7 +130,10 @@ class Hg(object): if not before: return False - (rc, out, err) = self._command(['update', '-C', '-R', self.dest]) + args = ['update', '-C', '-R', self.dest] + if self.revision is not None: + args = args + ['-r', self.revision] + (rc, out, err) = self._command(args) if rc != 0: self.module.fail_json(msg=err) @@ -170,13 +175,30 @@ class Hg(object): ['pull', '-R', self.dest, self.repo]) def update(self): + if self.revision is not None: + return self._command(['update', '-r', self.revision, '-R', self.dest]) return self._command(['update', '-R', self.dest]) def clone(self): - return self._command(['clone', self.repo, self.dest, '-r', self.revision]) + if self.revision is not None: + return self._command(['clone', self.repo, self.dest, '-r', self.revision]) + return self._command(['clone', self.repo, self.dest]) - def switch_version(self): - return self._command(['update', '-r', self.revision, '-R', self.dest]) + @property + def at_revision(self): + """ + There is no point in pulling from a potentially down/slow remote site + if the desired changeset is already the current changeset. + """ + if self.revision is None or len(self.revision) < 7: + # Assume it's a rev number, tag, or branch + return False + (rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest]) + if rc != 0: + self.module.fail_json(msg=err) + if out.startswith(self.revision): + return True + return False # =========================================== @@ -185,8 +207,8 @@ def main(): argument_spec = dict( repo = dict(required=True, aliases=['name']), dest = dict(required=True), - revision = dict(default="default", aliases=['version']), - force = dict(default='yes', type='bool'), + revision = dict(default=None, aliases=['version']), + force = dict(default='no', type='bool'), purge = dict(default='no', type='bool'), executable = dict(default=None), ), @@ -212,6 +234,12 @@ def main(): (rc, out, err) = hg.clone() if rc != 0: module.fail_json(msg=err) + elif hg.at_revision: + # no update needed, don't pull + before = hg.get_revision() + + # but force and purge if desired + cleaned = hg.cleanup(force, purge) else: # get the current state before doing pulling before = hg.get_revision() @@ -227,7 +255,6 @@ def main(): if rc != 0: module.fail_json(msg=err) - hg.switch_version() after = hg.get_revision() if before != after or cleaned: changed = True diff --git a/source_control/subversion.py b/source_control/subversion.py index 6709a8c3939..f4a0f65fd78 100644 --- a/source_control/subversion.py +++ b/source_control/subversion.py @@ -50,8 +50,9 @@ options: force: description: - If C(yes), modified files will be discarded. If C(no), module will fail if it encounters modified files. + Prior to 1.9 the default was `yes`. required: false - default: "yes" + default: "no" choices: [ "yes", "no" ] username: description: @@ -123,7 +124,12 @@ class Subversion(object): def export(self, force=False): '''Export svn repo to directory''' - self._exec(["export", "-r", self.revision, self.repo, self.dest]) + cmd = ["export"] + if force: + cmd.append("--force") + cmd.extend(["-r", self.revision, self.repo, self.dest]) + + self._exec(cmd) def switch(self): '''Change working directory's repo.''' @@ -173,7 +179,7 @@ def main(): dest=dict(required=True), repo=dict(required=True, aliases=['name', 'repository']), revision=dict(default='HEAD', aliases=['rev', 'version']), - force=dict(default='yes', type='bool'), + force=dict(default='no', type='bool'), username=dict(required=False), password=dict(required=False), executable=dict(default=None), @@ -194,7 +200,7 @@ def main(): os.environ['LANG'] = 'C' svn = Subversion(module, dest, repo, revision, username, password, svn_path) - if not os.path.exists(dest): + if export or not os.path.exists(dest): before = None local_mods = False if module.check_mode: @@ -202,7 +208,7 @@ def main(): if not export: svn.checkout() else: - svn.export() + svn.export(force=force) elif os.path.exists("%s/.svn" % (dest, )): # Order matters. Need to get local mods before switch to avoid false # positives. Need to switch before revert to ensure we are reverting to @@ -222,9 +228,12 @@ def main(): else: module.fail_json(msg="ERROR: %s folder already exists, but its not a subversion repository." % (dest, )) - after = svn.get_revision() - changed = before != after or local_mods - module.exit_json(changed=changed, before=before, after=after) + if export: + module.exit_json(changed=True) + else: + after = svn.get_revision() + changed = before != after or local_mods + module.exit_json(changed=changed, before=before, after=after) # import module snippets from ansible.module_utils.basic import * diff --git a/system/authorized_key.py b/system/authorized_key.py index d5792200b8d..be2a442346d 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -37,7 +37,7 @@ options: aliases: [] key: description: - - The SSH public key, as a string + - The SSH public key(s), as a string or (since 1.9) url (https://github.com/username.keys) required: true default: null path: @@ -70,6 +70,15 @@ options: required: false default: null version_added: "1.4" + exclusive: + description: + - Whether to remove all other non-specified keys from the + authorized_keys file. Multiple keys can be specified in a single + key= string value by separating them by newlines. + required: false + choices: [ "yes", "no" ] + default: "no" + version_added: "1.9" description: - "Adds or removes authorized keys for particular user accounts" author: Brad Olson @@ -79,6 +88,9 @@ EXAMPLES = ''' # Example using key data from a local file on the management machine - authorized_key: user=charlie key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" +# Using github url as key source +- authorized_key: user=charlie key=https://github.com/charlie.keys + # Using alternate directory locations: - authorized_key: user=charlie key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" @@ -97,6 +109,10 @@ EXAMPLES = ''' - authorized_key: user=charlie key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}" key_options='no-port-forwarding,host="10.0.1.1"' + +# Set up authorized_keys exclusively with one key +- authorized_key: user=root key=public_keys/doe-jane state=present + exclusive=yes ''' # Makes sure the public key line is present or absent in the user's .ssh/authorized_keys. @@ -332,16 +348,32 @@ def enforce_state(module, params): manage_dir = params.get("manage_dir", True) state = params.get("state", "present") key_options = params.get("key_options", None) + exclusive = params.get("exclusive", False) + error_msg = "Error getting key from: %s" + + # if the key is a url, request it and use it as key source + if key.startswith("http"): + try: + resp, info = fetch_url(module, key) + if info['status'] != 200: + module.fail_json(msg=error_msg % key) + else: + key = resp.read() + except Exception: + module.fail_json(msg=error_msg % key) # extract individual keys into an array, skipping blank lines and comments key = [s for s in key.splitlines() if s and not s.startswith('#')] - # check current state -- just get the filename, don't create file do_write = False params["keyfile"] = keyfile(module, user, do_write, path, manage_dir) existing_keys = readkeys(module, params["keyfile"]) + # Add a place holder for keys that should exist in the state=present and + # exclusive=true case + keys_to_exist = [] + # Check our new keys, if any of them exist we'll continue. for new_key in key: parsed_new_key = parsekey(module, new_key) @@ -371,6 +403,7 @@ def enforce_state(module, params): # handle idempotent state=present if state=="present": + keys_to_exist.append(parsed_new_key[0]) if len(non_matching_keys) > 0: for non_matching_key in non_matching_keys: if non_matching_key[0] in existing_keys: @@ -387,6 +420,13 @@ def enforce_state(module, params): del existing_keys[parsed_new_key[0]] do_write = True + # remove all other keys to honor exclusive + if state == "present" and exclusive: + to_remove = frozenset(existing_keys).difference(keys_to_exist) + for key in to_remove: + del existing_keys[key] + do_write = True + if do_write: if module.check_mode: module.exit_json(changed=True) @@ -409,6 +449,7 @@ def main(): state = dict(default='present', choices=['absent','present']), key_options = dict(required=False, type='str'), unique = dict(default=False, type='bool'), + exclusive = dict(default=False, type='bool'), ), supports_check_mode=True ) @@ -418,4 +459,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.urls import * main() diff --git a/system/group.py b/system/group.py old mode 100644 new mode 100755 index 617de7c2857..83ea410b0b1 --- a/system/group.py +++ b/system/group.py @@ -251,6 +251,49 @@ class FreeBsdGroup(Group): # =========================================== + + +class DarwinGroup(Group): + """ + This is a Mac OS X Darwin Group manipulation class. + + This overrides the following methods from the generic class:- + - group_del() + - group_add() + - group_mod() + + group manupulation are done using dseditgroup(1). + """ + + platform = 'Darwin' + distribution = None + + def group_add(self, **kwargs): + cmd = [self.module.get_bin_path('dseditgroup', True)] + cmd += [ '-o', 'create' ] + cmd += [ '-i', self.gid ] + cmd += [ '-L', self.name ] + (rc, out, err) = self.execute_command(cmd) + return (rc, out, err) + + def group_del(self): + cmd = [self.module.get_bin_path('dseditgroup', True)] + cmd += [ '-o', 'delete' ] + cmd += [ '-L', self.name ] + (rc, out, err) = self.execute_command(cmd) + return (rc, out, err) + + def group_mod(self): + info = self.group_info() + if self.gid is not None and int(self.gid) != info[2]: + cmd = [self.module.get_bin_path('dseditgroup', True)] + cmd += [ '-o', 'edit' ] + cmd += [ '-i', self.gid ] + cmd += [ '-L', self.name ] + (rc, out, err) = self.execute_command(cmd) + return (rc, out, err) + return (None, '', '') + class OpenBsdGroup(Group): """ This is a OpenBSD Group manipulation class. diff --git a/system/hostname.py b/system/hostname.py index cd5738b43d5..3ec243af1f6 100644 --- a/system/hostname.py +++ b/system/hostname.py @@ -28,6 +28,7 @@ requirements: [ hostname ] description: - Set system's hostname - Currently implemented on Debian, Ubuntu, Fedora, RedHat, openSUSE, Linaro, ScientificLinux, Arch, CentOS, AMI. + - Any distribution that uses systemd as their init system options: name: required: true @@ -232,9 +233,9 @@ class RedHatStrategy(GenericStrategy): # =========================================== -class FedoraStrategy(GenericStrategy): +class SystemdStrategy(GenericStrategy): """ - This is a Fedora family Hostname manipulation strategy class - it uses + This is a Systemd hostname manipulation strategy class - it uses the hostnamectl command. """ @@ -323,17 +324,17 @@ class OpenRCStrategy(GenericStrategy): class FedoraHostname(Hostname): platform = 'Linux' distribution = 'Fedora' - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy class OpenSUSEHostname(Hostname): platform = 'Linux' distribution = 'Opensuse ' - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy class ArchHostname(Hostname): platform = 'Linux' distribution = 'Arch' - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy class RedHat5Hostname(Hostname): platform = 'Linux' @@ -345,7 +346,7 @@ class RedHatServerHostname(Hostname): distribution = 'Red hat enterprise linux server' distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy else: strategy_class = RedHatStrategy @@ -354,7 +355,7 @@ class RedHatWorkstationHostname(Hostname): distribution = 'Red hat enterprise linux workstation' distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy else: strategy_class = RedHatStrategy @@ -363,7 +364,7 @@ class CentOSHostname(Hostname): distribution = 'Centos' distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy else: strategy_class = RedHatStrategy @@ -372,19 +373,27 @@ class CentOSLinuxHostname(Hostname): distribution = 'Centos linux' distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy else: strategy_class = RedHatStrategy class ScientificHostname(Hostname): platform = 'Linux' distribution = 'Scientific' - strategy_class = RedHatStrategy + distribution_version = get_distribution_version() + if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): + strategy_class = SystemdStrategy + else: + strategy_class = RedHatStrategy class ScientificLinuxHostname(Hostname): platform = 'Linux' distribution = 'Scientific linux' - strategy_class = RedHatStrategy + distribution_version = get_distribution_version() + if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): + strategy_class = SystemdStrategy + else: + strategy_class = RedHatStrategy class AmazonLinuxHostname(Hostname): platform = 'Linux' @@ -401,6 +410,11 @@ class UbuntuHostname(Hostname): distribution = 'Ubuntu' strategy_class = DebianStrategy +class LinuxmintHostname(Hostname): + platform = 'Linux' + distribution = 'Linuxmint' + strategy_class = DebianStrategy + class LinaroHostname(Hostname): platform = 'Linux' distribution = 'Linaro' diff --git a/system/mount.py b/system/mount.py index 9dc6fbe7b8c..d415d65b0d5 100644 --- a/system/mount.py +++ b/system/mount.py @@ -320,6 +320,17 @@ def main(): if os.path.ismount(name): if changed: res,msg = mount(module, **args) + elif 'bind' in args.get('opts', []): + changed = True + cmd = 'mount -l' + rc, out, err = module.run_command(cmd) + allmounts = out.split('\n') + for mounts in allmounts[:-1]: + arguments = mounts.split() + if arguments[0] == args['src'] and arguments[2] == args['name'] and arguments[4] == args['fstype']: + changed = False + if changed: + res,msg = mount(module, **args) else: changed = True res,msg = mount(module, **args) diff --git a/system/selinux.py b/system/selinux.py index 53e53d1d49c..908bbc250ec 100644 --- a/system/selinux.py +++ b/system/selinux.py @@ -174,14 +174,19 @@ def main(): if (state != runtime_state): if module.check_mode: module.exit_json(changed=True) - if (state == 'disabled'): - msgs.append('state change will take effect next reboot') - else: - if (runtime_enabled): + if (runtime_enabled): + if (state == 'disabled'): + if (runtime_state != 'permissive'): + # Temporarily set state to permissive + set_state('permissive') + msgs.append('runtime state temporarily changed from \'%s\' to \'permissive\', state change will take effect next reboot' % (runtime_state)) + else: + msgs.append('state change will take effect next reboot') + else: set_state(state) msgs.append('runtime state changed from \'%s\' to \'%s\'' % (runtime_state, state)) - else: - msgs.append('state change will take effect next reboot') + else: + msgs.append('state change will take effect next reboot') changed=True if (state != config_state): diff --git a/system/service.py b/system/service.py index 275bac900a9..9d921472349 100644 --- a/system/service.py +++ b/system/service.py @@ -26,7 +26,7 @@ version_added: "0.1" short_description: Manage services. description: - Controls services on remote hosts. Supported init systems include BSD init, - OpenRC, SysV, systemd, upstart. + OpenRC, SysV, Solaris SMF, systemd, upstart. options: name: required: true @@ -105,8 +105,14 @@ import shlex import select import time import string +import glob -from distutils.version import LooseVersion +# The distutils module is not shipped with SUNWPython on Solaris. +# It's in the SUNWPython-devel package which also contains development files +# that don't belong on production boxes. Since our Solaris code doesn't +# depend on LooseVersion, do not import it on Solaris. +if platform.system() != 'SunOS': + from distutils.version import LooseVersion class Service(object): """ @@ -387,7 +393,7 @@ class LinuxService(Service): def get_service_tools(self): paths = [ '/sbin', '/usr/sbin', '/bin', '/usr/bin' ] - binaries = [ 'service', 'chkconfig', 'update-rc.d', 'rc-service', 'rc-update', 'initctl', 'systemctl', 'start', 'stop', 'restart' ] + binaries = [ 'service', 'chkconfig', 'update-rc.d', 'rc-service', 'rc-update', 'initctl', 'systemctl', 'start', 'stop', 'restart', 'insserv' ] initpaths = [ '/etc/init.d' ] location = dict() @@ -455,6 +461,9 @@ class LinuxService(Service): if location.get('update-rc.d', False): # and uses update-rc.d self.enable_cmd = location['update-rc.d'] + elif location.get('insserv', None): + # and uses insserv + self.enable_cmd = location['insserv'] elif location.get('chkconfig', False): # and uses chkconfig self.enable_cmd = location['chkconfig'] @@ -473,6 +482,12 @@ class LinuxService(Service): if location.get('initctl', False): self.svc_initctl = location['initctl'] + def get_systemd_service_enabled(self): + (rc, out, err) = self.execute_command("%s is-enabled %s" % (self.enable_cmd, self.__systemd_unit,)) + if rc == 0: + return True + return False + def get_systemd_status_dict(self): (rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit,)) if rc != 0: @@ -687,12 +702,11 @@ class LinuxService(Service): action = 'disable' # Check if we're already in the correct state - d = self.get_systemd_status_dict() - if "UnitFileState" in d: - if self.enable and d["UnitFileState"] == "enabled": - self.changed = False - elif not self.enable and d["UnitFileState"] == "disabled": - self.changed = False + service_enabled = self.get_systemd_service_enabled() + if self.enable and service_enabled: + self.changed = False + elif not self.enable and not service_enabled: + self.changed = False elif not self.enable: self.changed = False @@ -734,45 +748,74 @@ class LinuxService(Service): # update-rc.d style # if self.enable_cmd.endswith("update-rc.d"): - if self.enable: - action = 'enable' - else: - action = 'disable' - if self.enable: - # make sure the init.d symlinks are created - # otherwise enable might not work - (rc, out, err) = self.execute_command("%s %s defaults" \ - % (self.enable_cmd, self.name)) + enabled = False + slinks = glob.glob('/etc/rc?.d/S??' + self.name) + if slinks: + enabled = True + + if self.enable != enabled: + self.changed = True + + if self.enable: + action = 'enable' + klinks = glob.glob('/etc/rc?.d/K??' + self.name) + if not klinks: + (rc, out, err) = self.execute_command("%s %s defaults" % (self.enable_cmd, self.name)) + if rc != 0: + if err: + self.module.fail_json(msg=err) + else: + self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action) + else: + action = 'disable' + + (rc, out, err) = self.execute_command("%s %s %s" % (self.enable_cmd, self.name, action)) if rc != 0: if err: self.module.fail_json(msg=err) else: - self.module.fail_json(msg=out) + self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action) + else: + self.changed = False + + return + + # + # insserv (Debian 7) + # + if self.enable_cmd.endswith("insserv"): + if self.enable: + (rc, out, err) = self.execute_command("%s -n %s" % (self.enable_cmd, self.name)) + else: + (rc, out, err) = self.execute_command("%s -nr %s" % (self.enable_cmd, self.name)) - (rc, out, err) = self.execute_command("%s -n %s %s" \ - % (self.enable_cmd, self.name, action)) self.changed = False - for line in out.splitlines(): - if line.startswith('rename'): - self.changed = True - break - elif self.enable and 'do not exist' in line: + for line in err.splitlines(): + if self.enable and line.find('enable service') != -1: self.changed = True break - elif not self.enable and 'already exist' in line: + if not self.enable and line.find('remove service') != -1: self.changed = True break - # Debian compatibility - for line in err.splitlines(): - if self.enable and 'no runlevel symlinks to modify' in line: - self.changed = True - break + if self.module.check_mode: + self.module.exit_json(changed=self.changed) if not self.changed: return + if self.enable: + (rc, out, err) = self.execute_command("%s %s" % (self.enable_cmd, self.name)) + if (rc != 0) or (err != ''): + self.module.fail_json(msg=("Failed to install service. rc: %s, out: %s, err: %s" % (rc, out, err))) + return (rc, out, err) + else: + (rc, out, err) = self.execute_command("%s -r %s" % (self.enable_cmd, self.name)) + if (rc != 0) or (err != ''): + self.module.fail_json(msg=("Failed to remove service. rc: %s, out: %s, err: %s" % (rc, out, err))) + return (rc, out, err) + # # If we've gotten to the end, the service needs to be updated # @@ -942,34 +985,151 @@ class FreeBsdService(Service): class OpenBsdService(Service): """ - This is the OpenBSD Service manipulation class - it uses /etc/rc.d for - service control. Enabling a service is currently not supported because the - _flags variable is not boolean, you should supply a rc.conf.local - file in some other way. + This is the OpenBSD Service manipulation class - it uses rcctl(8) or + /etc/rc.d scripts for service control. Enabling a service is + only supported if rcctl is present. """ platform = 'OpenBSD' distribution = None def get_service_tools(self): - rcdir = '/etc/rc.d' + self.enable_cmd = self.module.get_bin_path('rcctl') + + if self.enable_cmd: + self.svc_cmd = self.enable_cmd + else: + rcdir = '/etc/rc.d' - rc_script = "%s/%s" % (rcdir, self.name) - if os.path.isfile(rc_script): - self.svc_cmd = rc_script + rc_script = "%s/%s" % (rcdir, self.name) + if os.path.isfile(rc_script): + self.svc_cmd = rc_script if not self.svc_cmd: - self.module.fail_json(msg='unable to find rc.d script') + self.module.fail_json(msg='unable to find svc_cmd') def get_service_status(self): - rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'check')) + if self.enable_cmd: + rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svc_cmd, 'check', self.name)) + else: + rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'check')) + + if stderr: + self.module.fail_json(msg=stderr) + if rc == 1: self.running = False elif rc == 0: self.running = True def service_control(self): - return self.execute_command("%s %s" % (self.svc_cmd, self.action)) + if self.enable_cmd: + return self.execute_command("%s -f %s %s" % (self.svc_cmd, self.action, self.name)) + else: + return self.execute_command("%s -f %s" % (self.svc_cmd, self.action)) + + def service_enable(self): + if not self.enable_cmd: + return super(OpenBsdService, self).service_enable() + + rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'getdef', self.name, 'flags')) + + if stderr: + self.module.fail_json(msg=stderr) + + getdef_string = stdout.rstrip() + + # Depending on the service the string returned from 'default' may be + # either a set of flags or the boolean YES/NO + if getdef_string == "YES" or getdef_string == "NO": + default_flags = '' + else: + default_flags = getdef_string + + rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'get', self.name, 'flags')) + + if stderr: + self.module.fail_json(msg=stderr) + + get_string = stdout.rstrip() + + # Depending on the service the string returned from 'getdef/get' may be + # either a set of flags or the boolean YES/NO + if get_string == "YES" or get_string == "NO": + current_flags = '' + else: + current_flags = get_string + + # If there are arguments from the user we use these as flags unless + # they are already set. + if self.arguments and self.arguments != current_flags: + changed_flags = self.arguments + # If the user has not supplied any arguments and the current flags + # differ from the default we reset them. + elif not self.arguments and current_flags != default_flags: + changed_flags = ' ' + # Otherwise there is no need to modify flags. + else: + changed_flags = '' + + rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'get', self.name, 'status')) + + if self.enable: + if rc == 0 and not changed_flags: + return + + if rc != 0: + status_action = "set %s status on" % (self.name) + else: + status_action = '' + if changed_flags: + flags_action = "set %s flags %s" % (self.name, changed_flags) + else: + flags_action = '' + else: + if rc == 1: + return + + status_action = "set %s status off" % self.name + flags_action = '' + + # Verify state assumption + if not status_action and not flags_action: + self.module.fail_json(msg="neither status_action or status_flags is set, this should never happen") + + if self.module.check_mode: + self.module.exit_json(changed=True, msg="changing service enablement") + + status_modified = 0 + if status_action: + rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, status_action)) + + if rc != 0: + if stderr: + self.module.fail_json(msg=stderr) + else: + self.module.fail_json(msg="rcctl failed to modify service status") + + status_modified = 1 + + if flags_action: + rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, flags_action)) + + if rc != 0: + if stderr: + if status_modified: + error_message = "rcctl modified service status but failed to set flags: " + stderr + else: + error_message = stderr + else: + if status_modified: + error_message = "rcctl modified service status but failed to set flags" + else: + error_message = "rcctl failed to modify service flags" + + self.module.fail_json(msg=error_message) + + self.changed = True # =========================================== # Subclass: NetBSD diff --git a/system/sysctl.py b/system/sysctl.py index 3cf29f9a32b..4517c724ca9 100644 --- a/system/sysctl.py +++ b/system/sysctl.py @@ -185,12 +185,20 @@ class SysctlModule(object): def _parse_value(self, value): if value is None: return '' - elif value.lower() in BOOLEANS_TRUE: - return '1' - elif value.lower() in BOOLEANS_FALSE: - return '0' + elif isinstance(value, bool): + if value: + return '1' + else: + return '0' + elif isinstance(value, basestring): + if value.lower() in BOOLEANS_TRUE: + return '1' + elif value.lower() in BOOLEANS_FALSE: + return '0' + else: + return value.strip() else: - return value.strip() + return value # ============================================================== # SYSCTL COMMAND MANAGEMENT diff --git a/system/user.py b/system/user.py old mode 100644 new mode 100755 index 30ae29d30ae..046daf20769 --- a/system/user.py +++ b/system/user.py @@ -81,13 +81,14 @@ options: the user example in the github examples directory for what this looks like in a playbook. The `FAQ `_ contains details on various ways to generate these password values. + Note on Darwin system, this value has to be cleartext. + Beware of security issues. state: required: false default: "present" choices: [ present, absent ] description: - - Whether the account should exist. When C(absent), removes - the user account. + - Whether the account should exist or not, taking action if the state is different from what is stated. createhome: required: false default: "yes" @@ -95,7 +96,7 @@ options: description: - Unless set to C(no), a home directory will be made for the user when the account is created or if the home directory does not - exist. + exist. move_home: required: false default: "no" @@ -153,13 +154,14 @@ options: present on target host. ssh_key_file: required: false - default: $HOME/.ssh/id_rsa + default: .ssh/id_rsa version_added: "0.9" description: - - Optionally specify the SSH key filename. + - Optionally specify the SSH key filename. If this is a relative + filename then it will be relative to the user's home directory. ssh_key_comment: required: false - default: ansible-generated + default: ansible-generated on $HOSTNAME version_added: "0.9" description: - Optionally define the comment for the SSH key. @@ -177,6 +179,13 @@ options: version_added: "1.3" description: - C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users. + expires: + version_added: "1.9" + required: false + default: "None" + description: + - An expiry time for the user in epoch, it will be ignored on platforms that do not support this. + Currently supported on Linux and FreeBSD. ''' EXAMPLES = ''' @@ -189,8 +198,11 @@ EXAMPLES = ''' # Remove the user 'johnd' - user: name=johnd state=absent remove=yes -# Create a 2048-bit SSH key for user jsmith -- user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 +# Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa +- user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa + +# added a consultant whose account you want to expire +- user: name=james18 shell=/bin/zsh groups=developers expires=1422403387 ''' import os @@ -198,6 +210,8 @@ import pwd import grp import syslog import platform +import socket +import time try: import spwd @@ -225,6 +239,7 @@ class User(object): platform = 'Generic' distribution = None SHADOWFILE = '/etc/shadow' + DATE_FORMAT = '%Y-%M-%d' def __new__(cls, *args, **kwargs): return load_platform_subclass(User, args, kwargs) @@ -254,6 +269,14 @@ class User(object): self.ssh_comment = module.params['ssh_key_comment'] self.ssh_passphrase = module.params['ssh_key_passphrase'] self.update_password = module.params['update_password'] + self.expires = None + + if module.params['expires']: + try: + self.expires = time.gmtime(module.params['expires']) + except Exception,e: + module.fail_json("Invalid expires time %s: %s" %(self.expires, str(e))) + if module.params['ssh_key_file'] is not None: self.ssh_file = module.params['ssh_key_file'] else: @@ -262,12 +285,13 @@ class User(object): # select whether we dump additional debug info through syslog self.syslogging = False - def execute_command(self, cmd, use_unsafe_shell=False): + + def execute_command(self, cmd, use_unsafe_shell=False, data=None): if self.syslogging: syslog.openlog('ansible-%s' % os.path.basename(__file__)) syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd)) - return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell) + return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) def remove_user_userdel(self): cmd = [self.module.get_bin_path('userdel', True)] @@ -326,6 +350,10 @@ class User(object): cmd.append('-s') cmd.append(self.shell) + if self.expires: + cmd.append('--expiredate') + cmd.append(time.strftime(self.DATE_FORMAT, self.expires)) + if self.password is not None: cmd.append('-p') cmd.append(self.password) @@ -432,6 +460,10 @@ class User(object): cmd.append('-s') cmd.append(self.shell) + if self.expires: + cmd.append('--expiredate') + cmd.append(time.strftime(self.DATE_FORMAT, self.expires)) + if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd.append('-p') cmd.append(self.password) @@ -536,7 +568,7 @@ class User(object): if not os.path.exists(info[5]): return (1, '', 'User %s home directory does not exist' % self.name) ssh_key_file = self.get_ssh_key_path() - ssh_dir = os.path.dirname(ssh_key_file) + ssh_dir = os.path.dirname(ssh_key_file) if not os.path.exists(ssh_dir): try: os.mkdir(ssh_dir, 0700) @@ -625,7 +657,7 @@ class User(object): os.chown(os.path.join(root, f), uid, gid) except OSError, e: self.module.exit_json(failed=True, msg="%s" % e) - + # =========================================== @@ -702,6 +734,11 @@ class FreeBsdUser(User): cmd.append('-L') cmd.append(self.login_class) + if self.expires: + days =( time.mktime(self.expires) - time.time() ) / 86400 + cmd.append('-e') + cmd.append(str(int(days))) + # system cannot be handled currently - should we error if its requested? # create the user (rc, out, err) = self.execute_command(cmd) @@ -714,7 +751,7 @@ class FreeBsdUser(User): self.module.get_bin_path('chpass', True), '-p', self.password, - self.name + self.name ] return self.execute_command(cmd) @@ -725,7 +762,7 @@ class FreeBsdUser(User): self.module.get_bin_path('pw', True), 'usermod', '-n', - self.name + self.name ] cmd_len = len(cmd) info = self.user_info() @@ -760,8 +797,17 @@ class FreeBsdUser(User): cmd.append(self.shell) if self.login_class is not None: - cmd.append('-L') - cmd.append(self.login_class) + # find current login class + user_login_class = None + if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK): + for line in open(self.SHADOWFILE).readlines(): + if line.startswith('%s:' % self.name): + user_login_class = line.split(':')[4] + + # act only if login_class change + if self.login_class != user_login_class: + cmd.append('-L') + cmd.append(self.login_class) if self.groups is not None: current_groups = self.user_group_membership() @@ -786,6 +832,11 @@ class FreeBsdUser(User): new_groups = groups | set(current_groups) cmd.append(','.join(new_groups)) + if self.expires: + days = ( time.mktime(self.expires) - time.time() ) / 86400 + cmd.append('-e') + cmd.append(str(int(days))) + # modify the user if cmd will do anything if cmd_len != len(cmd): (rc, out, err) = self.execute_command(cmd) @@ -1255,7 +1306,7 @@ class SunOS(User): cmd.append('-G') new_groups = groups if self.append: - new_groups.extend(current_groups) + new_groups.update(current_groups) cmd.append(','.join(new_groups)) if self.comment is not None and info[4] != self.comment: @@ -1304,6 +1355,321 @@ class SunOS(User): return (rc, out, err) +# =========================================== +class DarwinUser(User): + """ + This is a Darwin Mac OS X User manipulation class. + Main differences are that Darwin:- + - Handles accounts in a database managed by dscl(1) + - Has no useradd/groupadd + - Does not create home directories + - User password must be cleartext + - UID must be given + - System users must ben under 500 + + This overrides the following methods from the generic class:- + - user_exists() + - create_user() + - remove_user() + - modify_user() + """ + platform = 'Darwin' + distribution = None + SHADOWFILE = None + + dscl_directory = '.' + + fields = [ + ('comment', 'RealName'), + ('home', 'NFSHomeDirectory'), + ('shell', 'UserShell'), + ('uid', 'UniqueID'), + ('group', 'PrimaryGroupID'), + ] + + def _get_dscl(self): + return [ self.module.get_bin_path('dscl', True), self.dscl_directory ] + + def _list_user_groups(self): + cmd = self._get_dscl() + cmd += [ '-search', '/Groups', 'GroupMembership', self.name ] + (rc, out, err) = self.execute_command(cmd) + groups = [] + for line in out.splitlines(): + if line.startswith(' ') or line.startswith(')'): + continue + groups.append(line.split()[0]) + return groups + + def _get_user_property(self, property): + '''Return user PROPERTY as given my dscl(1) read or None if not found.''' + cmd = self._get_dscl() + cmd += [ '-read', '/Users/%s' % self.name, property ] + (rc, out, err) = self.execute_command(cmd) + if rc != 0: + return None + # from dscl(1) + # if property contains embedded spaces, the list will instead be + # displayed one entry per line, starting on the line after the key. + lines = out.splitlines() + #sys.stderr.write('*** |%s| %s -> %s\n' % (property, out, lines)) + if len(lines) == 1: + return lines[0].split(': ')[1] + else: + if len(lines) > 2: + return '\n'.join([ lines[1].strip() ] + lines[2:]) + else: + if len(lines) == 2: + return lines[1].strip() + else: + return None + + def _change_user_password(self): + '''Change password for SELF.NAME against SELF.PASSWORD. + + Please note that password must be cleatext. + ''' + # some documentation on how is stored passwords on OSX: + # http://blog.lostpassword.com/2012/07/cracking-mac-os-x-lion-accounts-passwords/ + # http://null-byte.wonderhowto.com/how-to/hack-mac-os-x-lion-passwords-0130036/ + # http://pastebin.com/RYqxi7Ca + # on OSX 10.8+ hash is SALTED-SHA512-PBKDF2 + # https://pythonhosted.org/passlib/lib/passlib.hash.pbkdf2_digest.html + # https://gist.github.com/nueh/8252572 + cmd = self._get_dscl() + if self.password: + cmd += [ '-passwd', '/Users/%s' % self.name, self.password] + else: + cmd += [ '-create', '/Users/%s' % self.name, 'Password', '*'] + (rc, out, err) = self.execute_command(cmd) + if rc != 0: + self.module.fail_json(msg='Error when changing password', + err=err, out=out, rc=rc) + return (rc, out, err) + + def _make_group_numerical(self): + '''Convert SELF.GROUP to is stringed numerical value suitable for dscl.''' + if self.group is not None: + try: + self.group = grp.getgrnam(self.group).gr_gid + except KeyError: + self.module.fail_json(msg='Group "%s" not found. Try to create it first using "group" module.' % self.group) + # We need to pass a string to dscl + self.group = str(self.group) + + def __modify_group(self, group, action): + '''Add or remove SELF.NAME to or from GROUP depending on ACTION. + ACTION can be 'add' or 'remove' otherwhise 'remove' is assumed. ''' + if action == 'add': + option = '-a' + else: + option = '-d' + cmd = [ 'dseditgroup', '-o', 'edit', option, self.name, + '-t', 'user', group ] + (rc, out, err) = self.execute_command(cmd) + if rc != 0: + self.module.fail_json(msg='Cannot %s user "%s" to group "%s".' + % (action, self.name, group), + err=err, out=out, rc=rc) + return (rc, out, err) + + def _modify_group(self): + '''Add or remove SELF.NAME to or from GROUP depending on ACTION. + ACTION can be 'add' or 'remove' otherwhise 'remove' is assumed. ''' + + rc = 0 + out = '' + err = '' + changed = False + + current = set(self._list_user_groups()) + if self.groups is not None: + target = set(self.groups.split(',')) + else: + target = set([]) + + for remove in current - target: + (_rc, _err, _out) = self.__modify_group(remove, 'delete') + rc += rc + out += _out + err += _err + changed = True + + for add in target - current: + (_rc, _err, _out) = self.__modify_group(add, 'add') + rc += _rc + out += _out + err += _err + changed = True + + return (rc, err, out, changed) + + def _update_system_user(self): + '''Hide or show user on login window according SELF.SYSTEM. + + Returns 0 if a change has been made, None otherwhise.''' + + plist_file = '/Library/Preferences/com.apple.loginwindow.plist' + + # http://support.apple.com/kb/HT5017?viewlocale=en_US + uid = int(self.uid) + cmd = [ 'defaults', 'read', plist_file, 'HiddenUsersList' ] + (rc, out, err) = self.execute_command(cmd) + # returned value is + # ( + # "_userA", + # "_UserB", + # userc + # ) + hidden_users = [] + for x in out.splitlines()[1:-1]: + try: + x = x.split('"')[1] + except IndexError: + x = x.strip() + hidden_users.append(x) + + if self.system: + if not self.name in hidden_users: + cmd = [ 'defaults', 'write', plist_file, + 'HiddenUsersList', '-array-add', self.name ] + (rc, out, err) = self.execute_command(cmd) + if rc != 0: + self.module.fail_json( + msg='Cannot user "%s" to hidden user list.' + % self.name, err=err, out=out, rc=rc) + return 0 + else: + if self.name in hidden_users: + del(hidden_users[hidden_users.index(self.name)]) + + cmd = [ 'defaults', 'write', plist_file, + 'HiddenUsersList', '-array' ] + hidden_users + (rc, out, err) = self.execute_command(cmd) + if rc != 0: + self.module.fail_json( + msg='Cannot remove user "%s" from hidden user list.' + % self.name, err=err, out=out, rc=rc) + return 0 + + def user_exists(self): + '''Check is SELF.NAME is a known user on the system.''' + cmd = self._get_dscl() + cmd += [ '-list', '/Users/%s' % self.name] + (rc, out, err) = self.execute_command(cmd) + return rc == 0 + + def remove_user(self): + '''Delete SELF.NAME. If SELF.FORCE is true, remove its home directory.''' + info = self.user_info() + + cmd = self._get_dscl() + cmd += [ '-delete', '/Users/%s' % self.name] + (rc, out, err) = self.execute_command(cmd) + + if rc != 0: + self.module.fail_json( + msg='Cannot delete user "%s".' + % self.name, err=err, out=out, rc=rc) + + if self.force: + if os.path.exists(info[5]): + shutil.rmtree(info[5]) + out += "Removed %s" % info[5] + + return (rc, out, err) + + def create_user(self, command_name='dscl'): + cmd = self._get_dscl() + cmd += [ '-create', '/Users/%s' % self.name] + (rc, err, out) = self.execute_command(cmd) + if rc != 0: + self.module.fail_json( + msg='Cannot create user "%s".' + % self.name, err=err, out=out, rc=rc) + + + self._make_group_numerical() + + # Homedir is not created by default + if self.createhome: + if self.home is None: + self.home = '/Users/%s' % self.name + if not os.path.exists(self.home): + os.makedirs(self.home) + self.chown_homedir(int(self.uid), int(self.group), self.home) + + for field in self.fields: + if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]: + + cmd = self._get_dscl() + cmd += [ '-create', '/Users/%s' % self.name, + field[1], self.__dict__[field[0]]] + (rc, _err, _out) = self.execute_command(cmd) + if rc != 0: + self.module.fail_json( + msg='Cannot add property "%s" to user "%s".' + % (field[0], self.name), err=err, out=out, rc=rc) + + out += _out + err += _err + if rc != 0: + return (rc, _err, _out) + + + (rc, _err, _out) = self._change_user_password() + out += _out + err += _err + + self._update_system_user() + # here we don't care about change status since it is a creation, + # thus changed is always true. + (rc, _out, _err, changed) = self._modify_group() + out += _out + err += _err + return (rc, err, out) + + def modify_user(self): + changed = None + out = '' + err = '' + + self._make_group_numerical() + + for field in self.fields: + if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]: + current = self._get_user_property(field[1]) + if current is None or current != self.__dict__[field[0]]: + cmd = self._get_dscl() + cmd += [ '-create', '/Users/%s' % self.name, + field[1], self.__dict__[field[0]]] + (rc, _err, _out) = self.execute_command(cmd) + if rc != 0: + self.module.fail_json( + msg='Cannot update property "%s" for user "%s".' + % (field[0], self.name), err=err, out=out, rc=rc) + changed = rc + out += _out + err += _err + if self.update_password == 'always': + (rc, _err, _out) = self._change_user_password() + out += _out + err += _err + changed = rc + + (rc, _out, _err, _changed) = self._modify_group() + out += _out + err += _err + + if _changed is True: + changed = rc + + rc = self._update_system_user() + if rc == 0: + changed = rc + + return (changed, out, err) + # =========================================== class AIX(User): @@ -1367,11 +1733,10 @@ class AIX(User): # set password with chpasswd if self.password is not None: cmd = [] - cmd.append('echo \''+self.name+':'+self.password+'\' |') cmd.append(self.module.get_bin_path('chpasswd', True)) cmd.append('-e') cmd.append('-c') - self.execute_command(' '.join(cmd), use_unsafe_shell=True) + self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password)) return (rc, out, err) @@ -1430,7 +1795,6 @@ class AIX(User): cmd.append('-s') cmd.append(self.shell) - # skip if no changes to be made if len(cmd) == 1: (rc, out, err) = (None, '', '') @@ -1443,11 +1807,10 @@ class AIX(User): # set password with chpasswd if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd = [] - cmd.append('echo \''+self.name+':'+self.password+'\' |') cmd.append(self.module.get_bin_path('chpasswd', True)) cmd.append('-e') cmd.append('-c') - (rc2, out2, err2) = self.execute_command(' '.join(cmd), use_unsafe_shell=True) + (rc2, out2, err2) = self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password)) else: (rc2, out2, err2) = (None, '', '') @@ -1458,12 +1821,164 @@ class AIX(User): # =========================================== +class HPUX(User): + """ + This is a HP-UX User manipulation class. + + This overrides the following methods from the generic class:- + - create_user() + - remove_user() + - modify_user() + """ + + platform = 'HP-UX' + distribution = None + SHADOWFILE = '/etc/shadow' + + def create_user(self): + cmd = ['/usr/sam/lbin/useradd.sam'] + + if self.uid is not None: + cmd.append('-u') + cmd.append(self.uid) + + if self.non_unique: + cmd.append('-o') + + if self.group is not None: + if not self.group_exists(self.group): + self.module.fail_json(msg="Group %s does not exist" % self.group) + cmd.append('-g') + cmd.append(self.group) + + if self.groups is not None and len(self.groups): + groups = self.get_groups_set() + cmd.append('-G') + cmd.append(','.join(groups)) + + if self.comment is not None: + cmd.append('-c') + cmd.append(self.comment) + + if self.home is not None: + cmd.append('-d') + cmd.append(self.home) + + if self.shell is not None: + cmd.append('-s') + cmd.append(self.shell) + + if self.password is not None: + cmd.append('-p') + cmd.append(self.password) + + if self.createhome: + cmd.append('-m') + else: + cmd.append('-M') + + if self.system: + cmd.append('-r') + + cmd.append(self.name) + return self.execute_command(cmd) + + def remove_user(self): + cmd = ['/usr/sam/lbin/userdel.sam'] + if self.force: + cmd.append('-F') + if self.remove: + cmd.append('-r') + cmd.append(self.name) + return self.execute_command(cmd) + + def modify_user(self): + cmd = ['/usr/sam/lbin/usermod.sam'] + info = self.user_info() + has_append = self._check_usermod_append() + + if self.uid is not None and info[2] != int(self.uid): + cmd.append('-u') + cmd.append(self.uid) + + if self.non_unique: + cmd.append('-o') + + if self.group is not None: + if not self.group_exists(self.group): + self.module.fail_json(msg="Group %s does not exist" % self.group) + ginfo = self.group_info(self.group) + if info[3] != ginfo[2]: + cmd.append('-g') + cmd.append(self.group) + + if self.groups is not None: + current_groups = self.user_group_membership() + groups_need_mod = False + groups = [] + + if self.groups == '': + if current_groups and not self.append: + groups_need_mod = True + else: + groups = self.get_groups_set(remove_existing=False) + group_diff = set(current_groups).symmetric_difference(groups) + + if group_diff: + if self.append: + for g in groups: + if g in group_diff: + if has_append: + cmd.append('-a') + groups_need_mod = True + break + else: + groups_need_mod = True + + if groups_need_mod: + if self.append and not has_append: + cmd.append('-A') + cmd.append(','.join(group_diff)) + else: + cmd.append('-G') + cmd.append(','.join(groups)) + + + if self.comment is not None and info[4] != self.comment: + cmd.append('-c') + cmd.append(self.comment) + + if self.home is not None and info[5] != self.home: + cmd.append('-d') + cmd.append(self.home) + if self.move_home: + cmd.append('-m') + + if self.shell is not None and info[6] != self.shell: + cmd.append('-s') + cmd.append(self.shell) + + if self.update_password == 'always' and self.password is not None and info[1] != self.password: + cmd.append('-p') + cmd.append(self.password) + + # skip if no changes to be made + if len(cmd) == 1: + return (None, '', '') + elif self.module.check_mode: + return (0, '', '') + + cmd.append(self.name) + return self.execute_command(cmd) + +# =========================================== + def main(): ssh_defaults = { 'bits': '2048', 'type': 'rsa', 'passphrase': None, - 'comment': 'ansible-generated' + 'comment': 'ansible-generated on %s' % socket.gethostname() } module = AnsibleModule( argument_spec = dict( @@ -1494,7 +2009,8 @@ def main(): ssh_key_file=dict(default=None, type='str'), ssh_key_comment=dict(default=ssh_defaults['comment'], type='str'), ssh_key_passphrase=dict(default=None, type='str'), - update_password=dict(default='always',choices=['always','on_create'],type='str') + update_password=dict(default='always',choices=['always','on_create'],type='str'), + expires=dict(default=None, type='float'), ), supports_check_mode=True ) @@ -1562,6 +2078,16 @@ def main(): if user.groups is not None: result['groups'] = user.groups + # handle missing homedirs + info = user.user_info() + if user.home is None: + user.home = info[5] + if not os.path.exists(user.home) and user.createhome: + if not module.check_mode: + user.create_homedir(user.home) + user.chown_homedir(info[2], info[3], user.home) + result['changed'] = True + # deal with ssh key if user.sshkeygen: (rc, out, err) = user.ssh_key_gen() @@ -1577,16 +2103,6 @@ def main(): result['ssh_key_file'] = user.get_ssh_key_path() result['ssh_public_key'] = user.get_ssh_public_key() - # handle missing homedirs - info = user.user_info() - if user.home is None: - user.home = info[5] - if not os.path.exists(user.home) and user.createhome: - if not module.check_mode: - user.create_homedir(user.home) - user.chown_homedir(info[2], info[3], user.home) - result['changed'] = True - module.exit_json(**result) # import module snippets diff --git a/utilities/logic/wait_for.py b/utilities/logic/wait_for.py index ae316fe1a17..0c340751e73 100644 --- a/utilities/logic/wait_for.py +++ b/utilities/logic/wait_for.py @@ -54,7 +54,7 @@ version_added: "0.7" options: host: description: - - hostname or IP address to wait for + - A resolvable hostname or IP address to wait for required: false default: "127.0.0.1" aliases: [] @@ -63,6 +63,11 @@ options: - maximum number of seconds to wait for required: false default: 300 + connect_timeout: + description: + - maximum number of seconds to wait for a connection to happen before closing and retrying + required: false + default: 5 delay: description: - number of seconds to wait before starting to poll @@ -123,8 +128,9 @@ EXAMPLES = ''' # wait until the process is finished and pid was destroyed - wait_for: path=/proc/3466/status state=absent -# Wait 300 seconds for port 22 to become open and contain "OpenSSH", don't start checking for 10 seconds -- local_action: wait_for port=22 host="{{ inventory_hostname }}" search_regex=OpenSSH delay=10 +# wait 300 seconds for port 22 to become open and contain "OpenSSH", don't assume the inventory_hostname is resolvable +# and don't start checking for 10 seconds +- local_action: wait_for port=22 host="{{ ansible_ssh_host | default(inventory_hostname) }}" search_regex=OpenSSH delay=10 ''' diff --git a/web_infrastructure/apache2_module.py b/web_infrastructure/apache2_module.py index 39351482087..817e782aa76 100644 --- a/web_infrastructure/apache2_module.py +++ b/web_infrastructure/apache2_module.py @@ -49,9 +49,12 @@ import re def _disable_module(module): name = module.params['name'] a2dismod_binary = module.get_bin_path("a2dismod") + if a2dismod_binary is None: + module.fail_json(msg="a2dismod not found. Perhaps this system does not use a2dismod to manage apache") + result, stdout, stderr = module.run_command("%s %s" % (a2dismod_binary, name)) - if re.match(r'.*' + name + r' already disabled.*', stdout, re.S): + if re.match(r'.*\b' + name + r' already disabled', stdout, re.S): module.exit_json(changed = False, result = "Success") elif result != 0: module.fail_json(msg="Failed to disable module %s: %s" % (name, stdout)) @@ -61,9 +64,12 @@ def _disable_module(module): def _enable_module(module): name = module.params['name'] a2enmod_binary = module.get_bin_path("a2enmod") + if a2enmod_binary is None: + module.fail_json(msg="a2enmod not found. Perhaps this system does not use a2enmod to manage apache") + result, stdout, stderr = module.run_command("%s %s" % (a2enmod_binary, name)) - if re.match(r'.*' + name + r' already enabled.*', stdout, re.S): + if re.match(r'.*\b' + name + r' already enabled', stdout, re.S): module.exit_json(changed = False, result = "Success") elif result != 0: module.fail_json(msg="Failed to enable module %s: %s" % (name, stdout)) @@ -86,4 +92,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/windows/win_copy.ps1 b/windows/win_copy.ps1 deleted file mode 100644 index 9ffdab85f03..00000000000 --- a/windows/win_copy.ps1 +++ /dev/null @@ -1,84 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -$src= Get-Attr $params "src" $FALSE; -If ($src -eq $FALSE) -{ - Fail-Json (New-Object psobject) "missing required argument: src"; -} - -$dest= Get-Attr $params "dest" $FALSE; -If ($dest -eq $FALSE) -{ - Fail-Json (New-Object psobject) "missing required argument: dest"; -} - -# seems to be supplied by the calling environment, but -# probably shouldn't be a test for it existing in the params. -# TODO investigate. -$original_basename = Get-Attr $params "original_basename" $FALSE; -If ($original_basename -eq $FALSE) -{ - Fail-Json (New-Object psobject) "missing required argument: original_basename "; -} - -$result = New-Object psobject @{ - changed = $FALSE -}; - -# if $dest is a dir, append $original_basename so the file gets copied with its intended name. -if (Test-Path $dest -PathType Container) -{ - $dest = Join-Path $dest $original_basename; -} - -If (Test-Path $dest) -{ - $dest_checksum = Get-FileChecksum ($dest); - $src_checksum = Get-FileChecksum ($src); - - If (! $src_checksum.CompareTo($dest_checksum)) - { - # New-Item -Force creates subdirs for recursive copies - New-Item -Force $dest -Type file; - Copy-Item -Path $src -Destination $dest -Force; - } - $dest_checksum = Get-FileChecksum ($dest); - If ( $src_checksum.CompareTo($dest_checksum)) - { - $result.changed = $TRUE; - } - Else - { - Fail-Json (New-Object psobject) "Failed to place file"; - } -} -Else -{ - New-Item -Force $dest -Type file; - Copy-Item -Path $src -Destination $dest; - $result.changed = $TRUE; -} - -$dest_checksum = Get-FileChecksum($dest); -$result.checksum = $dest_checksum; - -Exit-Json $result; diff --git a/windows/win_copy.py b/windows/win_copy.py deleted file mode 100644 index 7d0b49e5985..00000000000 --- a/windows/win_copy.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import os -import time - -DOCUMENTATION = ''' ---- -module: win_copy -version_added: "1.8" -short_description: Copies files to remote locations on windows hosts. -description: - - The M(win_copy) module copies a file on the local box to remote windows locations. -options: - src: - description: - - Local path to a file to copy to the remote server; can be absolute or relative. - If path is a directory, it is copied recursively. In this case, if path ends - with "/", only inside contents of that directory are copied to destination. - Otherwise, if it does not end with "/", the directory itself with all contents - is copied. This behavior is similar to Rsync. - required: false - default: null - aliases: [] - dest: - description: - - Remote absolute path where the file should be copied to. If src is a directory, - this must be a directory too. Use \\ for path separators. - required: true - default: null -author: Michael DeHaan -notes: - - The "win_copy" module recursively copy facility does not scale to lots (>hundreds) of files. - Instead, you may find it better to create files locally, perhaps using win_template, and - then use win_get_url to put them in the correct location. -''' - -EXAMPLES = ''' -# Example from Ansible Playbooks -- win_copy: src=/srv/myfiles/foo.conf dest=c:\\TEMP\\foo.conf - -''' - diff --git a/windows/win_file.ps1 b/windows/win_file.ps1 deleted file mode 100644 index 62ac81fc1ee..00000000000 --- a/windows/win_file.ps1 +++ /dev/null @@ -1,105 +0,0 @@ -#!powershell -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# WANT_JSON -# POWERSHELL_COMMON - -$params = Parse-Args $args; - -# path -$path = Get-Attr $params "path" $FALSE; -If ($path -eq $FALSE) -{ - $path = Get-Attr $params "dest" $FALSE; - If ($path -eq $FALSE) - { - $path = Get-Attr $params "name" $FALSE; - If ($path -eq $FALSE) - { - Fail-Json (New-Object psobject) "missing required argument: path"; - } - } -} - -# JH Following advice from Chris Church, only allow the following states -# in the windows version for now: -# state - file, directory, touch, absent -# (originally was: state - file, link, directory, hard, touch, absent) - -$state = Get-Attr $params "state" "file"; - -#$recurse = Get-Attr $params "recurse" "no"; - -# force - yes, no -# $force = Get-Attr $params "force" "no"; - -# result -$result = New-Object psobject @{ - changed = $FALSE -}; - -If ( $state -eq "touch" ) -{ - If(Test-Path $path) - { - (Get-ChildItem $path).LastWriteTime = Get-Date - } - Else - { - echo $null > $file - } - $result.changed = $TRUE; -} - -If (Test-Path $path) -{ - $fileinfo = Get-Item $path; - If ( $state -eq "absent" ) - { - Remove-Item -Recurse -Force $fileinfo; - $result.changed = $TRUE; - } - Else - { - # Only files have the .Directory attribute. - If ( $state -eq "directory" -and $fileinfo.Directory ) - { - Fail-Json (New-Object psobject) "path is not a directory"; - } - - # Only files have the .Directory attribute. - If ( $state -eq "file" -and -not $fileinfo.Directory ) - { - Fail-Json (New-Object psobject) "path is not a file"; - } - - } -} -Else -{ - If ( $state -eq "directory" ) - { - New-Item -ItemType directory -Path $path - $result.changed = $TRUE; - } - - If ( $state -eq "file" ) - { - Fail-Json (New-Object psobject) "path will not be created"; - } -} - -Exit-Json $result; diff --git a/windows/win_file.py b/windows/win_file.py deleted file mode 100644 index 6a218216617..00000000000 --- a/windows/win_file.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - - -DOCUMENTATION = ''' ---- -module: win_file -version_added: "1.8" -short_description: Creates, touches or removes files or directories. -extends_documentation_fragment: files -description: - - Creates (empty) files, updates file modification stamps of existing files, - and can create or remove directories. - Unlike M(file), does not modify ownership, permissions or manipulate links. -notes: - - See also M(win_copy), M(win_template), M(copy), M(template), M(assemble) -requirements: [ ] -author: Michael DeHaan -options: - path: - description: - - 'path to the file being managed. Aliases: I(dest), I(name)' - required: true - default: [] - aliases: ['dest', 'name'] - state: - description: - - If C(directory), all immediate subdirectories will be created if they - do not exist. - If C(file), the file will NOT be created if it does not exist, see the M(copy) - or M(template) module if you want that behavior. If C(absent), - directories will be recursively deleted, and files will be removed. - If C(touch), an empty file will be created if the c(path) does not - exist, while an existing file or directory will receive updated file access and - modification times (similar to the way `touch` works from the command line). - required: false - default: file - choices: [ file, directory, touch, absent ] -''' - -EXAMPLES = ''' -# create a file -- win_file: path=C:\\temp\\foo.conf - -# touch a file (creates if not present, updates modification time if present) -- win_file: path=C:\\temp\\foo.conf state=touch - -# remove a file, if present -- win_file: path=C:\\temp\\foo.conf state=absent - -# create directory structure -- win_file: path=C:\\temp\\folder\\subfolder state=directory - -# remove directory structure -- win_file: path=C:\\temp state=absent -''' diff --git a/windows/win_stat.ps1 b/windows/win_stat.ps1 index 10101a62b30..4e4c55b2aa3 100644 --- a/windows/win_stat.ps1 +++ b/windows/win_stat.ps1 @@ -53,9 +53,11 @@ Else If ($get_md5 -and $result.stat.exists -and -not $result.stat.isdir) { - $hash = Get-FileChecksum($path); + $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider; + $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read); + $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower(); + $fp.Dispose(); Set-Attr $result.stat "md5" $hash; - Set-Attr $result.stat "checksum" $hash; } Exit-Json $result; diff --git a/windows/win_template.py b/windows/win_template.py deleted file mode 100644 index 402702f93b2..00000000000 --- a/windows/win_template.py +++ /dev/null @@ -1,52 +0,0 @@ -# this is a virtual module that is entirely implemented server side - -DOCUMENTATION = ''' ---- -module: win_template -version_added: 1.8 -short_description: Templates a file out to a remote server. -description: - - Templates are processed by the Jinja2 templating language - (U(http://jinja.pocoo.org/docs/)) - documentation on the template - formatting can be found in the Template Designer Documentation - (U(http://jinja.pocoo.org/docs/templates/)). - - "Six additional variables can be used in templates: C(ansible_managed) - (configurable via the C(defaults) section of C(ansible.cfg)) contains a string - which can be used to describe the template name, host, modification time of the - template file and the owner uid, C(template_host) contains the node name of - the template's machine, C(template_uid) the owner, C(template_path) the - absolute path of the template, C(template_fullpath) is the absolute path of the - template, and C(template_run_date) is the date that the template was rendered. Note that including - a string that uses a date in the template will result in the template being marked 'changed' - each time." -options: - src: - description: - - Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path. - required: true - default: null - aliases: [] - dest: - description: - - Location to render the template to on the remote machine. - required: true - default: null - backup: - description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. - required: false - choices: [ "yes", "no" ] - default: "no" -notes: - - "templates are loaded with C(trim_blocks=True)." -requirements: [] -author: Michael DeHaan -''' - -EXAMPLES = ''' -# Example -- win_template: src=/mytemplates/foo.j2 dest=C:\\temp\\file.conf - - -'''