diff --git a/internal/__init__.py b/cloud/amazon/__init__.py similarity index 100% rename from internal/__init__.py rename to cloud/amazon/__init__.py diff --git a/cloud/cloudformation.py b/cloud/amazon/cloudformation.py similarity index 90% rename from cloud/cloudformation.py rename to cloud/amazon/cloudformation.py index 6a7838a51b2..b382e3f05ff 100644 --- a/cloud/cloudformation.py +++ b/cloud/amazon/cloudformation.py @@ -50,7 +50,7 @@ options: state: description: - If state is "present", stack will be created. If state is "present" and if stack exists and template has changed, it will be updated. - If state is absent, stack will be removed. + If state is "absent", stack will be removed. required: true default: null aliases: [] @@ -60,6 +60,13 @@ options: required: true default: null aliases: [] + stack_policy: + description: + - the path of the cloudformation stack policy + required: false + default: null + aliases: [] + version_added: "x.x" tags: description: - Dictionary of tags to associate with stack and it's resources during stack creation. Cannot be updated later. @@ -97,18 +104,19 @@ EXAMPLES = ''' # Basic task example tasks: - name: launch ansible cloudformation example - action: cloudformation > - stack_name="ansible-cloudformation" state=present - region=us-east-1 disable_rollback=true - template=files/cloudformation-example.json - args: + cloudformation: + stack_name: "ansible-cloudformation" + state: "present" + region: "us-east-1" + disable_rollback: true + template: "files/cloudformation-example.json" template_parameters: - KeyName: jmartin - DiskType: ephemeral - InstanceType: m1.small + KeyName: "jmartin" + DiskType: "ephemeral" + InstanceType: "m1.small" ClusterSize: 3 tags: - Stack: ansible-cloudformation + Stack: "ansible-cloudformation" ''' import json @@ -122,13 +130,6 @@ except ImportError: sys.exit(1) -class Region: - def __init__(self, region): - '''connects boto to the region specified in the cloudformation template''' - self.name = region - self.endpoint = 'cloudformation.%s.amazonaws.com' % region - - def boto_exception(err): '''generic error message handler''' if hasattr(err, 'error_message'): @@ -196,6 +197,7 @@ def main(): template_parameters=dict(required=False, type='dict', default={}), state=dict(default='present', choices=['present', 'absent']), template=dict(default=None, required=True), + stack_policy=dict(default=None, required=False), disable_rollback=dict(default=False, type='bool'), tags=dict(default=None) ) @@ -208,6 +210,10 @@ def main(): state = module.params['state'] stack_name = module.params['stack_name'] template_body = open(module.params['template'], 'r').read() + if module.params['stack_policy'] is not None: + stack_policy_body = open(module.params['stack_policy'], 'r').read() + else: + stack_policy_body = None disable_rollback = module.params['disable_rollback'] template_parameters = module.params['template_parameters'] tags = module.params['tags'] @@ -226,11 +232,10 @@ def main(): stack_outputs = {} try: - cf_region = Region(region) - cfn = boto.cloudformation.connection.CloudFormationConnection( - aws_access_key_id=aws_access_key, + cfn = boto.cloudformation.connect_to_region( + region, + aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, - region=cf_region, ) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) @@ -244,6 +249,7 @@ def main(): try: cfn.create_stack(stack_name, parameters=template_parameters_tup, template_body=template_body, + stack_policy_body=stack_policy_body, disable_rollback=disable_rollback, capabilities=['CAPABILITY_IAM'], **kwargs) @@ -264,6 +270,7 @@ def main(): try: cfn.update_stack(stack_name, parameters=template_parameters_tup, template_body=template_body, + stack_policy_body=stack_policy_body, disable_rollback=disable_rollback, capabilities=['CAPABILITY_IAM']) operation = 'UPDATE' diff --git a/cloud/ec2.py b/cloud/amazon/ec2.py similarity index 93% rename from cloud/ec2.py rename to cloud/amazon/ec2.py index a4776c74b83..93b496cb5e8 100644 --- a/cloud/ec2.py +++ b/cloud/amazon/ec2.py @@ -17,9 +17,9 @@ DOCUMENTATION = ''' --- module: ec2 -short_description: create, terminate, start or stop an instance in ec2, return instanceid +short_description: create, terminate, start or stop an instance in ec2 description: - - Creates or terminates ec2 instances. When created optionally waits for it to be 'running'. This module has a dependency on python-boto >= 2.5 + - Creates or terminates ec2 instances. version_added: "0.9" options: key_name: @@ -28,12 +28,6 @@ options: required: false default: null aliases: ['keypair'] - id: - description: - - identifier for this instance or set of instances, so that the module will be idempotent with respect to EC2 instances. This identifier is valid for at least 24 hours after the termination of the instance, and should not be reused for another call later on. For details, see the description of client token at U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). - required: false - default: null - aliases: [] group: description: - security group (or list of groups) to use with the instance @@ -67,6 +61,13 @@ options: required: true default: null aliases: [] + tenancy: + version_added: "1.9" + description: + - An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are "default" or "dedicated". Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances. + required: false + default: default + aliases: [] spot_price: version_added: "1.5" description: @@ -76,7 +77,7 @@ options: aliases: [] image: description: - - I(emi) (or I(ami)) to use for the instance + - I(ami) ID to use for the instance required: true default: null aliases: [] @@ -94,7 +95,7 @@ options: aliases: [] wait: description: - - wait for the instance to be in state 'running' before returning + - wait for the instance to be 'running' before returning. Does not wait for SSH, see 'wait_for' example for details. required: false default: "no" choices: [ "yes", "no" ] @@ -226,54 +227,55 @@ extends_documentation_fragment: aws ''' EXAMPLES = ''' -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. +# Note: These examples do not set authentication details, see the AWS Guide for details. # Basic provisioning example -- local_action: - module: ec2 +- ec2: key_name: mykey - instance_type: c1.medium - image: emi-40603AD1 + instance_type: t2.micro + image: ami-123456 wait: yes group: webserver count: 3 + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes # Advanced example with tagging and CloudWatch -- local_action: - module: ec2 +- ec2: key_name: mykey group: databases - instance_type: m1.large - image: ami-6e649707 + instance_type: t2.micro + image: ami-123456 wait: yes wait_timeout: 500 count: 5 instance_tags: db: postgres monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes # Single instance with additional IOPS volume from snapshot and volume delete on termination -local_action: - module: ec2 +- ec2: key_name: mykey group: webserver - instance_type: m1.large - image: ami-6e649707 + instance_type: c3.medium + image: ami-123456 wait: yes wait_timeout: 500 volumes: - - device_name: /dev/sdb - snapshot: snap-abcdef12 - device_type: io1 - iops: 1000 - volume_size: 100 - delete_on_termination: true + - device_name: /dev/sdb + snapshot: snap-abcdef12 + device_type: io1 + iops: 1000 + volume_size: 100 + delete_on_termination: true monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes # Multiple groups example -local_action: - module: ec2 +- ec2: key_name: mykey group: ['databases', 'internal-services', 'sshable', 'and-so-forth'] instance_type: m1.large @@ -284,10 +286,11 @@ local_action: instance_tags: db: postgres monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes # Multiple instances with additional volume from snapshot -local_action: - module: ec2 +- ec2: key_name: mykey group: webserver instance_type: m1.large @@ -300,21 +303,23 @@ local_action: snapshot: snap-abcdef12 volume_size: 10 monitoring: yes + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes -# VPC example +# Dedicated tenancy example - local_action: module: ec2 - key_name: mykey + assign_public_ip: yes group_id: sg-1dc53f72 - instance_type: m1.small + key_name: mykey image: ami-6e649707 - wait: yes + instance_type: m1.small + tenancy: dedicated vpc_subnet_id: subnet-29e63245 - assign_public_ip: yes + wait: yes # Spot instance example -- local_action: - module: ec2 +- ec2: spot_price: 0.24 spot_wait_timeout: 600 keypair: mykey @@ -328,7 +333,6 @@ local_action: # Launch instances, runs some tasks # and then terminate them - - name: Create a sandbox instance hosts: localhost gather_facts: False @@ -340,13 +344,21 @@ local_action: region: us-east-1 tasks: - name: Launch instance - local_action: ec2 key_name={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image }} wait=true region={{ region }} + ec2: + key_name: "{{ keypair }}" + group: "{{ security_group }}" + instance_type: "{{ instance_type }}" + image: "{{ image }}" + wait: true + region: "{{ region }}" + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes register: ec2 - name: Add new instance to host group - local_action: add_host hostname={{ item.public_ip }} groupname=launched + add_host: hostname={{ item.public_ip }} groupname=launched with_items: ec2.instances - name: Wait for SSH to come up - local_action: wait_for host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started + wait_for: host={{ item.public_dns_name }} port=22 delay=60 timeout=320 state=started with_items: ec2.instances - name: Configure instance(s) @@ -362,8 +374,7 @@ local_action: connection: local tasks: - name: Terminate instances that were previously launched - local_action: - module: ec2 + ec2: state: 'absent' instance_ids: '{{ ec2.instance_ids }}' @@ -382,12 +393,13 @@ local_action: region: us-east-1 tasks: - name: Start the sandbox instances - local_action: - module: ec2 + ec2: instance_ids: '{{ instance_ids }}' region: '{{ region }}' state: running wait: True + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes role: - do_neat_stuff - do_more_neat_stuff @@ -403,39 +415,41 @@ local_action: - 'i-xxxxxx' region: us-east-1 tasks: - - name: Stop the sanbox instances - local_action: - module: ec2 - instance_ids: '{{ instance_ids }}' - region: '{{ region }}' - state: stopped - wait: True + - name: Stop the sandbox instances + ec2: + instance_ids: '{{ instance_ids }}' + region: '{{ region }}' + state: stopped + wait: True + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes # # Enforce that 5 instances with a tag "foo" are running +# (Highly recommended!) # -- local_action: - module: ec2 +- ec2: key_name: mykey instance_type: c1.medium - image: emi-40603AD1 + image: ami-40603AD1 wait: yes group: webserver instance_tags: foo: bar exact_count: 5 count_tag: foo + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes # # Enforce that 5 running instances named "database" with a "dbtype" of "postgres" # -- local_action: - module: ec2 +- ec2: key_name: mykey instance_type: c1.medium - image: emi-40603AD1 + image: ami-40603AD1 wait: yes group: webserver instance_tags: @@ -445,6 +459,8 @@ local_action: count_tag: Name: database dbtype: postgres + vpc_subnet_id: subnet-29e63245 + assign_public_ip: yes # # count_tag complex argument examples @@ -501,7 +517,7 @@ def _set_none_to_blank(dictionary): result = dictionary for k in result.iterkeys(): if type(result[k]) == dict: - result[k] = _set_non_to_blank(result[k]) + result[k] = _set_none_to_blank(result[k]) elif not result[k]: result[k] = "" return result @@ -585,6 +601,11 @@ def get_instance_info(inst): except AttributeError: instance_info['ebs_optimized'] = False + try: + instance_info['tenancy'] = getattr(inst, 'placement_tenancy') + except AttributeError: + instance_info['tenancy'] = 'default' + return instance_info def boto_supports_associate_public_ip_address(ec2): @@ -660,6 +681,11 @@ def enforce_count(module, ec2): count_tag = module.params.get('count_tag') zone = module.params.get('zone') + # fail here if the exact count was specified without filtering + # on a tag, as this may lead to a undesired removal of instances + if exact_count and count_tag is None: + module.fail_json(msg="you must use the 'count_tag' option with exact_count") + reservations, instances = find_running_instances_by_count_tag(module, ec2, count_tag, zone) changed = None @@ -723,6 +749,7 @@ def create_instances(module, ec2, override_count=None): group_id = module.params.get('group_id') zone = module.params.get('zone') instance_type = module.params.get('instance_type') + tenancy = module.params.get('tenancy') spot_price = module.params.get('spot_price') image = module.params.get('image') if override_count: @@ -806,6 +833,9 @@ def create_instances(module, ec2, override_count=None): if ebs_optimized: params['ebs_optimized'] = ebs_optimized + + if tenancy: + params['tenancy'] = tenancy if boto_supports_profile_name_arg(ec2): params['instance_profile_name'] = instance_profile_name @@ -1148,6 +1178,7 @@ def main(): count_tag = dict(), volumes = dict(type='list'), ebs_optimized = dict(type='bool', default=False), + tenancy = dict(default='default'), ) ) diff --git a/cloud/ec2_ami.py b/cloud/amazon/ec2_ami.py similarity index 93% rename from cloud/ec2_ami.py rename to cloud/amazon/ec2_ami.py index 3baf70a438f..ab1f986356b 100644 --- a/cloud/ec2_ami.py +++ b/cloud/amazon/ec2_ami.py @@ -18,9 +18,9 @@ DOCUMENTATION = ''' --- module: ec2_ami version_added: "1.3" -short_description: create or destroy an image in ec2, return imageid +short_description: create or destroy an image in ec2 description: - - Creates or deletes ec2 images. This module has a dependency on python-boto >= 2.5 + - Creates or deletes ec2 images. options: instance_id: description: @@ -79,7 +79,7 @@ options: aliases: [] delete_snapshot: description: - - Whether or not to deleted an AMI while deregistering it. + - Whether or not to delete an AMI while deregistering it. required: false default: null aliases: [] @@ -89,13 +89,10 @@ extends_documentation_fragment: aws ''' # Thank you to iAcquire for sponsoring development of this module. -# -# See http://alestic.com/2011/06/ec2-ami-security for more information about ensuring the security of your AMI. EXAMPLES = ''' # Basic AMI Creation -- local_action: - module: ec2_ami +- ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx instance_id: i-xxxxxx @@ -104,8 +101,7 @@ EXAMPLES = ''' register: instance # Basic AMI Creation, without waiting -- local_action: - module: ec2_ami +- ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx region: xxxxxx @@ -115,22 +111,20 @@ EXAMPLES = ''' register: instance # Deregister/Delete AMI -- local_action: - module: ec2_ami +- ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx region: xxxxxx - image_id: ${instance.image_id} + image_id: "{{ instance.image_id }}" delete_snapshot: True state: absent # Deregister AMI -- local_action: - module: ec2_ami +- ec2_ami: aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx region: xxxxxx - image_id: ${instance.image_id} + image_id: "{{ instance.image_id }}" delete_snapshot: False state: absent diff --git a/cloud/ec2_ami_search.py b/cloud/amazon/ec2_ami_search.py similarity index 95% rename from cloud/ec2_ami_search.py rename to cloud/amazon/ec2_ami_search.py index 25875de39bd..1dd5f056e96 100644 --- a/cloud/ec2_ami_search.py +++ b/cloud/amazon/ec2_ami_search.py @@ -16,10 +16,11 @@ # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . + DOCUMENTATION = ''' --- module: ec2_ami_search -short_description: Retrieve AWS AMI for a given operating system. +short_description: Retrieve AWS AMI information for a given operating system. version_added: "1.6" description: - Look up the most recent AMI on AWS for a given operating system. @@ -56,7 +57,8 @@ options: required: false default: us-east-1 choices: ["ap-northeast-1", "ap-southeast-1", "ap-southeast-2", - "eu-west-1", "sa-east-1", "us-east-1", "us-west-1", "us-west-2", "us-gov-west-1"] + "eu-central-1", "eu-west-1", "sa-east-1", "us-east-1", + "us-west-1", "us-west-2", "us-gov-west-1"] virt: description: virutalization type required: false @@ -88,11 +90,13 @@ SUPPORTED_DISTROS = ['ubuntu'] AWS_REGIONS = ['ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', + 'eu-central-1', 'eu-west-1', 'sa-east-1', 'us-east-1', 'us-west-1', - 'us-west-2'] + 'us-west-2', + "us-gov-west-1"] def get_url(module, url): diff --git a/cloud/ec2_asg.py b/cloud/amazon/ec2_asg.py old mode 100755 new mode 100644 similarity index 92% rename from cloud/ec2_asg.py rename to cloud/amazon/ec2_asg.py index 3fc033e6d65..6e5d3508cb8 --- a/cloud/ec2_asg.py +++ b/cloud/amazon/ec2_asg.py @@ -119,21 +119,23 @@ extends_documentation_fragment: aws """ EXAMPLES = ''' -A basic example of configuration: +# Basic configuration - ec2_asg: name: special - load_balancers: 'lb1,lb2' - availability_zones: 'eu-west-1a,eu-west-1b' + load_balancers: [ 'lb1', 'lb2' ] + availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] launch_config_name: 'lc-1' min_size: 1 max_size: 10 desired_capacity: 5 - vpc_zone_identifier: 'subnet-abcd1234,subnet-1a2b3c4d' + vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] tags: - environment: production propagate_at_launch: no +# Rolling ASG Updates + Below is an example of how to assign a new launch config to an ASG and terminate old instances. All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in @@ -199,7 +201,7 @@ except ImportError: ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity', 'health_check_period', 'health_check_type', 'launch_config_name', 'load_balancers', 'max_size', 'min_size', 'name', 'placement_group', - 'tags', 'termination_policies', 'vpc_zone_identifier') + 'termination_policies', 'vpc_zone_identifier') INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name') @@ -245,6 +247,10 @@ def get_properties(autoscaling_group): properties['pending_instances'] += 1 properties['instance_facts'] = instance_facts properties['load_balancers'] = autoscaling_group.load_balancers + + if getattr(autoscaling_group, "tags", None): + properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags) + return properties @@ -268,8 +274,10 @@ def create_autoscaling_group(connection, module): region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) + elif vpc_zone_identifier: + vpc_zone_identifier = ','.join(vpc_zone_identifier) asg_tags = [] for tag in set_tags: @@ -318,6 +326,8 @@ def create_autoscaling_group(connection, module): for attr in ASG_ATTRIBUTES: if module.params.get(attr): module_attr = module.params.get(attr) + if attr == 'vpc_zone_identifier': + module_attr = ','.join(module_attr) group_attr = getattr(as_group, attr) # we do this because AWS and the module may return the same list # sorted differently @@ -357,6 +367,7 @@ def create_autoscaling_group(connection, module): continue if changed: connection.create_or_update_tags(asg_tags) + as_group.tags = asg_tags # handle loadbalancers separately because None != [] load_balancers = module.params.get('load_balancers') or [] @@ -373,26 +384,6 @@ def create_autoscaling_group(connection, module): module.fail_json(msg=str(e)) - result = as_groups[0] - module.exit_json(changed=changed, name=result.name, - autoscaling_group_arn=result.autoscaling_group_arn, - availability_zones=result.availability_zones, - created_time=str(result.created_time), - default_cooldown=result.default_cooldown, - health_check_period=result.health_check_period, - health_check_type=result.health_check_type, - instance_id=result.instance_id, - instances=[instance.instance_id for instance in result.instances], - launch_config_name=result.launch_config_name, - load_balancers=result.load_balancers, - min_size=result.min_size, max_size=result.max_size, - placement_group=result.placement_group, - wait_timeout = dict(default=300), - tags=result.tags, - termination_policies=result.termination_policies, - vpc_zone_identifier=result.vpc_zone_identifier) - - def delete_autoscaling_group(connection, module): group_name = module.params.get('name') groups = connection.get_all_groups(names=[group_name]) @@ -426,13 +417,14 @@ def replace(connection, module): batch_size = module.params.get('replace_batch_size') wait_timeout = module.params.get('wait_timeout') - group_name = module.params.get('group_name') + group_name = module.params.get('name') max_size = module.params.get('max_size') min_size = module.params.get('min_size') desired_capacity = module.params.get('desired_capacity') + + # FIXME: we need some more docs about this feature replace_instances = module.params.get('replace_instances') - # wait for instance list to be populated on a newly provisioned ASG instance_wait = time.time() + 30 while instance_wait > time.time(): @@ -444,7 +436,7 @@ def replace(connection, module): time.sleep(10) if instance_wait <= time.time(): # waiting took too long - module.fail_json(msg = "Waited too for instances to appear. %s" % time.asctime()) + module.fail_json(msg = "Waited too long for instances to appear. %s" % time.asctime()) # determine if we need to continue replaceable = 0 if replace_instances: @@ -470,7 +462,7 @@ def replace(connection, module): props = get_properties(as_group) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg = "Waited too for instances to appear. %s" % time.asctime()) + module.fail_json(msg = "Waited too long for instances to appear. %s" % time.asctime()) instances = props['instances'] if replace_instances: instances = replace_instances @@ -490,7 +482,7 @@ def replace(connection, module): def replace_batch(connection, module, replace_instances): - group_name = module.params.get('group_name') + group_name = module.params.get('name') wait_timeout = int(module.params.get('wait_timeout')) lc_check = module.params.get('lc_check') @@ -567,7 +559,7 @@ def main(): min_size=dict(type='int'), max_size=dict(type='int'), desired_capacity=dict(type='int'), - vpc_zone_identifier=dict(type='str'), + vpc_zone_identifier=dict(type='list'), replace_batch_size=dict(type='int', default=1), replace_all_instances=dict(type='bool', default=False), replace_instances=dict(type='list', default=[]), @@ -577,9 +569,13 @@ def main(): tags=dict(type='list', default=[]), health_check_period=dict(type='int', default=300), health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), - ) + ), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive = [['replace_all_instances', 'replace_instances']] ) - module = AnsibleModule(argument_spec=argument_spec) state = module.params.get('state') replace_instances = module.params.get('replace_instances') @@ -591,16 +587,16 @@ def main(): module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region)) except boto.exception.NoAuthHandlerFound, e: module.fail_json(msg=str(e)) - changed = False - if replace_all_instances and replace_instances: - module.fail_json(msg="You can't use replace_instances and replace_all_instances in the same task.") + changed = create_changed = replace_changed = False + + if state == 'present': create_changed, asg_properties=create_autoscaling_group(connection, module) - if replace_all_instances or replace_instances: - replace_changed, asg_properties=replace(connection, module) elif state == 'absent': changed = delete_autoscaling_group(connection, module) module.exit_json( changed = changed ) + if replace_all_instances or replace_instances: + replace_changed, asg_properties=replace(connection, module) if create_changed or replace_changed: changed = True module.exit_json( changed = changed, **asg_properties ) diff --git a/cloud/ec2_eip.py b/cloud/amazon/ec2_eip.py similarity index 99% rename from cloud/ec2_eip.py rename to cloud/amazon/ec2_eip.py index cff83e482b3..fd0e8d04568 100644 --- a/cloud/ec2_eip.py +++ b/cloud/amazon/ec2_eip.py @@ -69,13 +69,13 @@ EXAMPLES = ''' ec2_eip: instance_id=i-1212f003 - name: allocate a new elastic IP without associating it to anything - ec2_eip: + action: ec2_eip register: eip - name: output the IP debug: msg="Allocated IP is {{ eip.public_ip }}" - name: provision new instances with ec2 - ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes group=webserver count=3 + ec2: keypair=mykey instance_type=c1.medium image=ami-40603AD1 wait=yes group=webserver count=3 register: ec2 - name: associate new elastic IPs with each of the instances ec2_eip: "instance_id={{ item }}" diff --git a/cloud/ec2_elb.py b/cloud/amazon/ec2_elb.py similarity index 98% rename from cloud/ec2_elb.py rename to cloud/amazon/ec2_elb.py index 42cb1819025..11abd827b2b 100644 --- a/cloud/ec2_elb.py +++ b/cloud/amazon/ec2_elb.py @@ -80,18 +80,18 @@ EXAMPLES = """ # basic pre_task and post_task example pre_tasks: - name: Gathering ec2 facts - ec2_facts: + action: ec2_facts - name: Instance De-register - local_action: ec2_elb - args: + local_action: + module: ec2_elb instance_id: "{{ ansible_ec2_instance_id }}" state: 'absent' roles: - myrole post_tasks: - name: Instance Register - local_action: ec2_elb - args: + local_action: + module: ec2_elb instance_id: "{{ ansible_ec2_instance_id }}" ec2_elbs: "{{ item }}" state: 'present' @@ -258,7 +258,7 @@ class ElbManager: try: elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) elbs = elb.get_all_load_balancers() @@ -278,7 +278,7 @@ class ElbManager: try: ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) return ec2.get_only_instances(instance_ids=[self.instance_id])[0] diff --git a/cloud/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py similarity index 99% rename from cloud/ec2_elb_lb.py rename to cloud/amazon/ec2_elb_lb.py index 462fbbcc797..d83db113963 100644 --- a/cloud/ec2_elb_lb.py +++ b/cloud/amazon/ec2_elb_lb.py @@ -115,7 +115,8 @@ EXAMPLES = """ # Note: None of these examples set aws_access_key, aws_secret_key, or region. # It is assumed that their matching environment variables are set. -# Basic provisioning example +# Basic provisioning example (non-VPC) + - local_action: module: ec2_elb_lb name: "test-please-delete" @@ -134,8 +135,8 @@ EXAMPLES = """ # ssl certificate required for https or ssl ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert" +# Internal ELB example -# Basic VPC provisioning example - local_action: module: ec2_elb_lb name: "test-vpc" @@ -214,7 +215,7 @@ EXAMPLES = """ name: 'New ELB' security_group_ids: 'sg-123456, sg-67890' region: us-west-2 - subnets: 'subnet-123456, subnet-67890' + subnets: 'subnet-123456,subnet-67890' purge_subnets: yes listeners: - protocol: http @@ -374,7 +375,7 @@ class ElbManager(object): try: return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) def _delete_elb(self): diff --git a/cloud/ec2_facts.py b/cloud/amazon/ec2_facts.py similarity index 98% rename from cloud/ec2_facts.py rename to cloud/amazon/ec2_facts.py index 7b5c610dc2d..cf2a90aabc5 100644 --- a/cloud/ec2_facts.py +++ b/cloud/amazon/ec2_facts.py @@ -34,8 +34,6 @@ description: - This module fetches data from the metadata servers in ec2 (aws) as per http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html. The module must be called from within the EC2 instance itself. - Eucalyptus cloud provides a similar service and this module should - work with this cloud provider as well. notes: - Parameters to filter on ec2_facts may be added later. author: "Silviu Dicu " @@ -65,6 +63,7 @@ class Ec2Metadata(object): AWS_REGIONS = ('ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', + 'eu-central-1', 'eu-west-1', 'sa-east-1', 'us-east-1', diff --git a/cloud/ec2_group.py b/cloud/amazon/ec2_group.py similarity index 93% rename from cloud/ec2_group.py rename to cloud/amazon/ec2_group.py index 1c8aa701015..59623e96d64 100644 --- a/cloud/ec2_group.py +++ b/cloud/amazon/ec2_group.py @@ -55,7 +55,7 @@ options: purge_rules_egress: version_added: "1.8" description: - - Purge existing rules_egree on security group that are not found in rules_egress + - Purge existing rules_egress on security group that are not found in rules_egress required: false default: 'true' aliases: [] @@ -70,8 +70,7 @@ notes: EXAMPLES = ''' - name: example ec2 group - local_action: - module: ec2_group + ec2_group: name: example description: an example EC2 group vpc_id: 12345 @@ -102,6 +101,7 @@ EXAMPLES = ''' - proto: tcp from_port: 80 to_port: 80 + cidr_ip: 0.0.0.0/0 group_name: example-other # description to use if example-other needs to be created group_desc: other example EC2 group @@ -114,11 +114,21 @@ except ImportError: sys.exit(1) +def make_rule_key(prefix, rule, group_id, cidr_ip): + """Creates a unique key for an individual group rule""" + if isinstance(rule, dict): + proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')] + else: # isinstance boto.ec2.securitygroup.IPPermissions + proto, from_port, to_port = [getattr(rule, x, None) for x in ('ip_protocol', 'from_port', 'to_port')] + + key = "%s-%s-%s-%s-%s-%s" % (prefix, proto, from_port, to_port, group_id, cidr_ip) + return key.lower().replace('-none', '-None') + + def addRulesToLookup(rules, prefix, dict): for rule in rules: for grant in rule.grants: - dict["%s-%s-%s-%s-%s-%s" % (prefix, rule.ip_protocol, rule.from_port, rule.to_port, - grant.group_id, grant.cidr_ip)] = rule + dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = rule def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id): @@ -279,7 +289,7 @@ def main(): rule['to_port'] = None # If rule already exists, don't later delete it - ruleId = "%s-%s-%s-%s-%s-%s" % ('in', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip) + ruleId = make_rule_key('in', rule, group_id, ip) if ruleId in groupRules: del groupRules[ruleId] # Otherwise, add new rule @@ -320,7 +330,7 @@ def main(): rule['to_port'] = None # If rule already exists, don't later delete it - ruleId = "%s-%s-%s-%s-%s-%s" % ('out', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip) + ruleId = make_rule_key('out', rule, group_id, ip) if ruleId in groupRules: del groupRules[ruleId] # Otherwise, add new rule @@ -339,7 +349,7 @@ def main(): cidr_ip=ip) changed = True elif vpc_id and not module.check_mode: - # when using a vpc, but no egress rules are specified, + # when using a vpc, but no egress rules are specified, # we add in a default allow all out rule, which was the # default behavior before egress rules were added default_egress_rule = 'out--1-None-None-None-0.0.0.0/0' diff --git a/cloud/ec2_key.py b/cloud/amazon/ec2_key.py similarity index 97% rename from cloud/ec2_key.py rename to cloud/amazon/ec2_key.py index 9c8274f764a..9f548496c4a 100644 --- a/cloud/ec2_key.py +++ b/cloud/amazon/ec2_key.py @@ -56,15 +56,13 @@ EXAMPLES = ''' # Creates a new ec2 key pair named `example` if not present, returns generated # private key - name: example ec2 key - local_action: - module: ec2_key + ec2_key: name: example # Creates a new ec2 key pair named `example` if not present using provided key -# material +# material. This could use the 'file' lookup plugin to pull this off disk. - name: example2 ec2 key - local_action: - module: ec2_key + ec2_key: name: example2 key_material: 'ssh-rsa AAAAxyz...== me@example.com' state: present @@ -72,16 +70,14 @@ EXAMPLES = ''' # Creates a new ec2 key pair named `example` if not present using provided key # material - name: example3 ec2 key - local_action: - module: ec2_key + ec2_key: name: example3 key_material: "{{ item }}" with_file: /path/to/public_key.id_rsa.pub # Removes ec2 key pair by name - name: remove example key - local_action: - module: ec2_key + ec2_key: name: example state: absent ''' diff --git a/cloud/ec2_lc.py b/cloud/amazon/ec2_lc.py old mode 100755 new mode 100644 similarity index 98% rename from cloud/ec2_lc.py rename to cloud/amazon/ec2_lc.py index f75dfe6d938..30f532c9e4f --- a/cloud/ec2_lc.py +++ b/cloud/amazon/ec2_lc.py @@ -93,7 +93,6 @@ options: description: - Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC. required: false - default: false aliases: [] version_added: "1.8" ramdisk_id: @@ -125,7 +124,7 @@ EXAMPLES = ''' name: special image_id: ami-XXX key_name: default - security_groups: 'group,group2' + security_groups: ['group', 'group2' ] instance_type: t1.micro ''' @@ -255,7 +254,7 @@ def main(): ebs_optimized=dict(default=False, type='bool'), associate_public_ip_address=dict(type='bool'), instance_monitoring=dict(default=False, type='bool'), - assign_public_ip=dict(default=False, type='bool') + assign_public_ip=dict(type='bool') ) ) @@ -265,7 +264,7 @@ def main(): try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) state = module.params.get('state') diff --git a/cloud/ec2_metric_alarm.py b/cloud/amazon/ec2_metric_alarm.py similarity index 99% rename from cloud/ec2_metric_alarm.py rename to cloud/amazon/ec2_metric_alarm.py index 519f88f24f8..7a8d573ce74 100644 --- a/cloud/ec2_metric_alarm.py +++ b/cloud/amazon/ec2_metric_alarm.py @@ -271,7 +271,7 @@ def main(): region, ec2_url, aws_connect_params = get_aws_connection_info(module) try: connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg=str(e)) if state == 'present': diff --git a/cloud/ec2_scaling_policy.py b/cloud/amazon/ec2_scaling_policy.py old mode 100755 new mode 100644 similarity index 97% rename from cloud/ec2_scaling_policy.py rename to cloud/amazon/ec2_scaling_policy.py index ad1fa7ce7f1..8e7d459e3e3 --- a/cloud/ec2_scaling_policy.py +++ b/cloud/amazon/ec2_scaling_policy.py @@ -163,9 +163,7 @@ def main(): try: connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params) - if not connection: - module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region)) - except boto.exception.NoAuthHandlerFound, e: + except (boto.exception.NoAuthHandlerFound, StandardError), e: module.fail_json(msg = str(e)) if state == 'present': diff --git a/cloud/ec2_snapshot.py b/cloud/amazon/ec2_snapshot.py similarity index 73% rename from cloud/ec2_snapshot.py rename to cloud/amazon/ec2_snapshot.py index a37aadb95e2..20cb017d81c 100644 --- a/cloud/ec2_snapshot.py +++ b/cloud/amazon/ec2_snapshot.py @@ -48,6 +48,32 @@ options: - a hash/dictionary of tags to add to the snapshot required: false version_added: "1.6" + wait: + description: + - wait for the snapshot to be ready + choices: ['yes', 'no'] + required: false + default: yes + version_added: "1.5.1" + wait_timeout: + description: + - how long before wait gives up, in seconds + - specify 0 to wait forever + required: false + default: 0 + version_added: "1.5.1" + state: + description: + - whether to add or create a snapshot + required: false + default: present + choices: ['absent', 'present'] + version_added: "1.9" + snapshot_id: + description: + - snapshot id to remove + required: false + version_added: "1.9" author: Will Thames extends_documentation_fragment: aws @@ -55,26 +81,29 @@ extends_documentation_fragment: aws EXAMPLES = ''' # Simple snapshot of volume using volume_id -- local_action: - module: ec2_snapshot +- ec2_snapshot: volume_id: vol-abcdef12 description: snapshot of /data from DB123 taken 2013/11/28 12:18:32 # Snapshot of volume mounted on device_name attached to instance_id -- local_action: - module: ec2_snapshot +- ec2_snapshot: instance_id: i-12345678 device_name: /dev/sdb1 description: snapshot of /data from DB123 taken 2013/11/28 12:18:32 # Snapshot of volume with tagging -- local_action: - module: ec2_snapshot +- ec2_snapshot: instance_id: i-12345678 device_name: /dev/sdb1 snapshot_tags: frequency: hourly source: /data + +# Remove a snapshot +- local_action: + module: ec2_snapshot + snapshot_id: snap-abcd1234 + state: absent ''' import sys @@ -93,24 +122,28 @@ def main(): volume_id = dict(), description = dict(), instance_id = dict(), + snapshot_id = dict(), device_name = dict(), wait = dict(type='bool', default='true'), wait_timeout = dict(default=0), snapshot_tags = dict(type='dict', default=dict()), + state = dict(choices=['absent','present'], default='present'), ) ) module = AnsibleModule(argument_spec=argument_spec) volume_id = module.params.get('volume_id') + snapshot_id = module.params.get('snapshot_id') description = module.params.get('description') instance_id = module.params.get('instance_id') device_name = module.params.get('device_name') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') snapshot_tags = module.params.get('snapshot_tags') + state = module.params.get('state') - if not volume_id and not instance_id or volume_id and instance_id: - module.fail_json('One and only one of volume_id or instance_id must be specified') + if not volume_id and not instance_id and not snapshot_id or volume_id and instance_id and snapshot_id: + module.fail_json('One and only one of volume_id or instance_id or snapshot_id must be specified') if instance_id and not device_name or device_name and not instance_id: module.fail_json('Instance ID and device name must both be specified') @@ -125,6 +158,20 @@ def main(): except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + if state == 'absent': + if not snapshot_id: + module.fail_json(msg = 'snapshot_id must be set when state is absent') + try: + snapshots = ec2.get_all_snapshots([snapshot_id]) + ec2.delete_snapshot(snapshot_id) + module.exit_json(changed=True) + except boto.exception.BotoServerError, e: + # exception is raised if snapshot does not exist + if e.error_code == 'InvalidSnapshot.NotFound': + module.exit_json(changed=False) + else: + module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) + try: snapshot = ec2.create_snapshot(volume_id, description=description) time_waited = 0 diff --git a/cloud/ec2_tag.py b/cloud/amazon/ec2_tag.py similarity index 87% rename from cloud/ec2_tag.py rename to cloud/amazon/ec2_tag.py index 4a33112189a..409041f906b 100644 --- a/cloud/ec2_tag.py +++ b/cloud/amazon/ec2_tag.py @@ -50,7 +50,7 @@ EXAMPLES = ''' # Basic example of adding tag(s) tasks: - name: tag a resource - local_action: ec2_tag resource=vol-XXXXXX region=eu-west-1 state=present + ec2_tag: resource=vol-XXXXXX region=eu-west-1 state=present args: tags: Name: ubervol @@ -59,11 +59,11 @@ tasks: # Playbook example of adding tag(s) to spawned instances tasks: - name: launch some instances - local_action: ec2 keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image_id }} wait=true region=eu-west-1 + ec2: keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image_id }} wait=true region=eu-west-1 register: ec2 - name: tag my launched instances - local_action: ec2_tag resource={{ item.id }} region=eu-west-1 state=present + ec2_tag: resource={{ item.id }} region=eu-west-1 state=present with_items: ec2.instances args: tags: @@ -71,11 +71,6 @@ tasks: env: prod ''' -# Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes. -# if state=present and it doesn't exist, create, tag and attach. -# Check for state by looking for volume attachment with tag (and against block device mapping?). -# Would personally like to revisit this in May when Eucalyptus also has tagging support (3.3). - import sys import time diff --git a/cloud/ec2_vol.py b/cloud/amazon/ec2_vol.py similarity index 91% rename from cloud/ec2_vol.py rename to cloud/amazon/ec2_vol.py index 0e662a77bdd..7fd58fa5348 100644 --- a/cloud/ec2_vol.py +++ b/cloud/amazon/ec2_vol.py @@ -48,6 +48,14 @@ options: required: false default: null aliases: [] + volume_type: + description: + - Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default + and continues to remain the Ansible default for backwards compatibility. + required: false + default: standard + aliases: [] + version_added: "1.9" iops: description: - the provisioned IOPs you want to associate with this volume (integer). @@ -105,36 +113,31 @@ extends_documentation_fragment: aws EXAMPLES = ''' # Simple attachment action -- local_action: - module: ec2_vol +- ec2_vol: instance: XXXXXX volume_size: 5 device_name: sdd # Example using custom iops params -- local_action: - module: ec2_vol +- ec2_vol: instance: XXXXXX volume_size: 5 iops: 200 device_name: sdd # Example using snapshot id -- local_action: - module: ec2_vol +- ec2_vol: instance: XXXXXX snapshot: "{{ snapshot }}" # Playbook example combined with instance launch -- local_action: - module: ec2 +- ec2: keypair: "{{ keypair }}" image: "{{ image }}" wait: yes count: 3 register: ec2 -- local_action: - module: ec2_vol +- ec2_vol: instance: "{{ item.id }} " volume_size: 5 with_items: ec2.instances @@ -144,8 +147,7 @@ EXAMPLES = ''' # * Nothing will happen if the volume is already attached. # * Volume must exist in the same zone. -- local_action: - module: ec2 +- ec2: keypair: "{{ keypair }}" image: "{{ image }}" zone: YYYYYY @@ -154,8 +156,7 @@ EXAMPLES = ''' count: 1 register: ec2 -- local_action: - module: ec2_vol +- ec2_vol: instance: "{{ item.id }}" name: my_existing_volume_Name_tag device_name: /dev/xvdf @@ -163,23 +164,28 @@ EXAMPLES = ''' register: ec2_vol # Remove a volume -- local_action: - module: ec2_vol +- ec2_vol: id: vol-XXXXXXXX state: absent +# Detach a volume +- ec2_vol: + id: vol-XXXXXXXX + instance: None + # List volumes for an instance -- local_action: - module: ec2_vol +- ec2_vol: instance: i-XXXXXX state: list + +# Create new volume using SSD storage +- ec2_vol: + instance: XXXXXX + volume_size: 50 + volume_type: gp2 + device_name: /dev/xvdf ''' -# Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes. -# if state=present and it doesn't exist, create, tag and attach. -# Check for state by looking for volume attachment with tag (and against block device mapping?). -# Would personally like to revisit this in May when Eucalyptus also has tagging support (3.3). - import sys import time @@ -253,22 +259,24 @@ def create_volume(module, ec2, zone): iops = module.params.get('iops') encrypted = module.params.get('encrypted') volume_size = module.params.get('volume_size') + volume_type = module.params.get('volume_type') snapshot = module.params.get('snapshot') # If custom iops is defined we use volume_type "io1" rather than the default of "standard" if iops: volume_type = 'io1' - else: - volume_type = 'standard' + + if instance == 'None' or instance == '': + instance = None # If no instance supplied, try volume creation based on module parameters. if name or id: - if not instance: - module.fail_json(msg = "If name or id is specified, instance must also be specified") if iops or volume_size: module.fail_json(msg = "Parameters are not compatible: [id or name] and [iops or volume_size]") volume = get_volume(module, ec2) if volume.attachment_state() is not None: + if instance is None: + return volume adata = volume.attach_data if adata.instance_id != instance: module.fail_json(msg = "Volume %s is already attached to another instance: %s" @@ -330,6 +338,13 @@ def attach_volume(module, ec2, volume, instance): except boto.exception.BotoServerError, e: module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message)) +def detach_volume(module, ec2): + vol = get_volume(module, ec2) + if not vol or vol.attachment_state() is None: + module.exit_json(changed=False) + else: + vol.detach() + module.exit_json(changed=True) def main(): argument_spec = ec2_argument_spec() @@ -338,6 +353,7 @@ def main(): id = dict(), name = dict(), volume_size = dict(), + volume_type = dict(choices=['standard', 'gp2', 'io1'], default='standard'), iops = dict(), encrypted = dict(), device_name = dict(), @@ -352,6 +368,7 @@ def main(): name = module.params.get('name') instance = module.params.get('instance') volume_size = module.params.get('volume_size') + volume_type = module.params.get('volume_type') iops = module.params.get('iops') encrypted = module.params.get('encrypted') device_name = module.params.get('device_name') @@ -359,6 +376,9 @@ def main(): snapshot = module.params.get('snapshot') state = module.params.get('state') + if instance == 'None' or instance == '': + instance = None + ec2 = ec2_connect(module) if state == 'list': @@ -425,7 +445,9 @@ def main(): volume = create_volume(module, ec2, zone) if instance: attach_volume(module, ec2, volume, inst) - module.exit_json(volume_id=volume.id, device=device_name) + else: + detach_volume(module, ec2) + module.exit_json(volume_id=volume.id, device=device_name, volume_type=volume.type) # import module snippets from ansible.module_utils.basic import * diff --git a/cloud/ec2_vpc.py b/cloud/amazon/ec2_vpc.py similarity index 99% rename from cloud/ec2_vpc.py rename to cloud/amazon/ec2_vpc.py index e4dc9a65f7d..00528f27849 100644 --- a/cloud/ec2_vpc.py +++ b/cloud/amazon/ec2_vpc.py @@ -130,16 +130,14 @@ EXAMPLES = ''' # It is assumed that their matching environment variables are set. # Basic creation example: - local_action: - module: ec2_vpc + ec2_vpc: state: present cidr_block: 172.23.0.0/16 resource_tags: { "Environment":"Development" } region: us-west-2 # Full creation example with subnets and optional availability zones. # The absence or presence of subnets deletes or creates them respectively. - local_action: - module: ec2_vpc + ec2_vpc: state: present cidr_block: 172.22.0.0/16 resource_tags: { "Environment":"Development" } @@ -170,8 +168,7 @@ EXAMPLES = ''' register: vpc # Removal of a VPC by id - local_action: - module: ec2_vpc + ec2_vpc: state: absent vpc_id: vpc-aaaaaaa region: us-west-2 diff --git a/cloud/elasticache.py b/cloud/amazon/elasticache.py similarity index 99% rename from cloud/elasticache.py rename to cloud/amazon/elasticache.py index 8c82f2fcc20..c1846f525a8 100644 --- a/cloud/elasticache.py +++ b/cloud/amazon/elasticache.py @@ -111,8 +111,7 @@ EXAMPLES = """ # It is assumed that their matching environment variables are set. # Basic example -- local_action: - module: elasticache +- elasticache: name: "test-please-delete" state: present engine: memcached @@ -126,14 +125,12 @@ EXAMPLES = """ # Ensure cache cluster is gone -- local_action: - module: elasticache +- elasticache: name: "test-please-delete" state: absent # Reboot cache cluster -- local_action: - module: elasticache +- elasticache: name: "test-please-delete" state: rebooted diff --git a/cloud/rds.py b/cloud/amazon/rds.py similarity index 97% rename from cloud/rds.py rename to cloud/amazon/rds.py index ba3f1e38d39..d6fd1622161 100644 --- a/cloud/rds.py +++ b/cloud/amazon/rds.py @@ -224,44 +224,45 @@ requirements: [ "boto" ] author: Bruce Pennypacker ''' +# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD + EXAMPLES = ''' # Basic mysql provisioning example -- rds: > - command=create - instance_name=new_database - db_engine=MySQL - size=10 - instance_type=db.m1.small - username=mysql_admin - password=1nsecure +- rds: + command: create + instance_name: new_database + db_engine: MySQL + size: 10 + instance_type: db.m1.small + username: mysql_admin + password: 1nsecure # Create a read-only replica and wait for it to become available -- rds: > - command=replicate - instance_name=new_database_replica - source_instance=new_database - wait=yes - wait_timeout=600 +- rds: + command: replicate + instance_name: new_database_replica + source_instance: new_database + wait: yes + wait_timeout: 600 # Delete an instance, but create a snapshot before doing so -- rds: > - command=delete - instance_name=new_database - snapshot=new_database_snapshot +- rds: + command: delete + instance_name: new_database + snapshot: new_database_snapshot # Get facts about an instance -- rds: > - command=facts - instance_name=new_database - register: new_database_facts +- rds: + command: facts + instance_name: new_database + register: new_database_facts # Rename an instance and wait for the change to take effect -- rds: > - command=modify - instance_name=new_database - new_instance_name=renamed_database - wait=yes - +- rds: + command: modify + instance_name: new_database + new_instance_name: renamed_database + wait: yes ''' import sys diff --git a/cloud/rds_param_group.py b/cloud/amazon/rds_param_group.py similarity index 97% rename from cloud/rds_param_group.py rename to cloud/amazon/rds_param_group.py index 39f9432057a..d1559ac78ae 100644 --- a/cloud/rds_param_group.py +++ b/cloud/amazon/rds_param_group.py @@ -85,17 +85,18 @@ author: Scott Anderson EXAMPLES = ''' # Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024 -- rds_param_group: > - state=present - name=norwegian_blue - description=My Fancy Ex Parrot Group - engine=mysql5.6 - params='{"auto_increment_increment": "42K"}' +- rds_param_group: + state: present + name: norwegian_blue + description: 'My Fancy Ex Parrot Group' + engine: 'mysql5.6' + params: + auto_increment_increment: "42K" # Remove a parameter group -- rds_param_group: > - state=absent - name=norwegian_blue +- rds_param_group: + state: absent + name: norwegian_blue ''' import sys diff --git a/cloud/rds_subnet_group.py b/cloud/amazon/rds_subnet_group.py similarity index 97% rename from cloud/rds_subnet_group.py rename to cloud/amazon/rds_subnet_group.py index 552c94f188a..9731154f77c 100644 --- a/cloud/rds_subnet_group.py +++ b/cloud/amazon/rds_subnet_group.py @@ -71,8 +71,7 @@ author: Scott Anderson EXAMPLES = ''' # Add or change a subnet group -- local_action: - module: rds_subnet_group +- rds_subnet_group state: present name: norwegian-blue description: My Fancy Ex Parrot Subnet Group @@ -80,10 +79,10 @@ EXAMPLES = ''' - subnet-aaaaaaaa - subnet-bbbbbbbb -# Remove a parameter group -- rds_param_group: > - state=absent - name=norwegian-blue +# Remove a subnet group +- rds_subnet_group: + state: absent + name: norwegian-blue ''' import sys diff --git a/cloud/route53.py b/cloud/amazon/route53.py similarity index 90% rename from cloud/route53.py rename to cloud/amazon/route53.py index 0d7fdcbade5..7fbe8552f41 100644 --- a/cloud/route53.py +++ b/cloud/amazon/route53.py @@ -88,51 +88,54 @@ requirements: [ "boto" ] author: Bruce Pennypacker ''' +# FIXME: the command stuff should have a more state like configuration alias -- MPD + EXAMPLES = ''' # Add new.foo.com as an A record with 3 IPs -- route53: > - command=create - zone=foo.com - record=new.foo.com - type=A - ttl=7200 - value=1.1.1.1,2.2.2.2,3.3.3.3 +- route53: + command: create + zone: foo.com + record: new.foo.com + type: A + ttl: 7200 + value: 1.1.1.1,2.2.2.2,3.3.3.3 # Retrieve the details for new.foo.com -- route53: > - command=get - zone=foo.com - record=new.foo.com - type=A +- route53: + command: get + zone: foo.com + record: new.foo.com + type: A register: rec # Delete new.foo.com A record using the results from the get command -- route53: > - command=delete - zone=foo.com - record={{ rec.set.record }} - type={{ rec.set.type }} - value={{ rec.set.value }} +- route53: + command: delete + zone: foo.com + record: "{{ rec.set.record }}" + ttl: "{{ rec.set.ttl }}" + type: "{{ rec.set.type }}" + value: "{{ rec.set.value }}" # Add an AAAA record. Note that because there are colons in the value # that the entire parameter list must be quoted: -- route53: > - command=create - zone=foo.com - record=localhost.foo.com - type=AAAA - ttl=7200 - value="::1" +- route53: + command: "create" + zone: "foo.com" + record: "localhost.foo.com" + type: "AAAA" + ttl: "7200" + value: "::1" # Add a TXT record. Note that TXT and SPF records must be surrounded # by quotes when sent to Route 53: -- route53: > - command=create - zone=foo.com - record=localhost.foo.com - type=TXT - ttl=7200 - value="\"bar\"" +- route53: + command: "create" + zone: "foo.com" + record: "localhost.foo.com" + type: "TXT" + ttl: "7200" + value: '"bar"' ''' @@ -160,7 +163,7 @@ def commit(changes, retry_interval): code = code.split("")[0] if code != 'PriorRequestNotComplete' or retry < 0: raise e - time.sleep(retry_interval) + time.sleep(float(retry_interval)) def main(): argument_spec = ec2_argument_spec() diff --git a/cloud/s3.py b/cloud/amazon/s3.py similarity index 92% rename from cloud/s3.py rename to cloud/amazon/s3.py index 6438c6405e7..7b914dd9117 100644 --- a/cloud/s3.py +++ b/cloud/amazon/s3.py @@ -68,7 +68,7 @@ options: aliases: [] s3_url: description: - - "S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined. Ansible tries to guess if fakes3 (https://github.com/jubos/fake-s3) or Eucalyptus Walrus (https://github.com/eucalyptus/eucalyptus/wiki/Walrus) is used and configure connection accordingly. Current heuristic is: everything with scheme fakes3:// is fakes3, everything else not ending with amazonaws.com is Walrus." + - "S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS" default: null aliases: [ S3_URL ] aws_secret_key: @@ -103,28 +103,19 @@ author: Lester Wade, Ralph Tice EXAMPLES = ''' # Simple PUT operation - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put + # Simple GET operation - s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get -# GET/download and overwrite local file (trust remote) -- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get -# GET/download and do not overwrite local file (trust remote) -- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get force=false -# PUT/upload and overwrite remote file (trust local) -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put + # PUT/upload with metadata -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip' -# PUT/upload with multiple metadata - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache' -# PUT/upload and do not overwrite remote file (trust local) -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put force=false -# Download an object as a string to use else where in your playbook -- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=getstr + # Create an empty bucket - s3: bucket=mybucket mode=create -# Create a bucket with key as directory -- s3: bucket=mybucket object=/my/directory/path mode=create -# Create an empty bucket in the EU region -- s3: bucket=mybucket mode=create region=eu-west-1 + +# Create a bucket with key as directory, in the EU region +- s3: bucket=mybucket object=/my/directory/path mode=create region=eu-west-1 + # Delete a bucket and all contents - s3: bucket=mybucket mode=delete ''' diff --git a/cloud/azure/__init__.py b/cloud/azure/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/azure.py b/cloud/azure/azure.py similarity index 100% rename from cloud/azure.py rename to cloud/azure/azure.py diff --git a/cloud/digital_ocean/__init__.py b/cloud/digital_ocean/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/digital_ocean.py b/cloud/digital_ocean/digital_ocean.py similarity index 100% rename from cloud/digital_ocean.py rename to cloud/digital_ocean/digital_ocean.py diff --git a/cloud/digital_ocean_domain.py b/cloud/digital_ocean/digital_ocean_domain.py similarity index 97% rename from cloud/digital_ocean_domain.py rename to cloud/digital_ocean/digital_ocean_domain.py index d0615ad0df0..1086a6bab16 100644 --- a/cloud/digital_ocean_domain.py +++ b/cloud/digital_ocean/digital_ocean_domain.py @@ -27,7 +27,7 @@ options: description: - Indicate desired state of the target. default: present - choices: ['present', 'active', 'absent', 'deleted'] + choices: ['present', 'absent'] client_id: description: - DigitalOcean manager id. @@ -145,7 +145,7 @@ class Domain(JsonfyMixIn): return False domains = Domain.list_all() - + if id is not None: for domain in domains: if domain.id == id: @@ -181,7 +181,7 @@ def core(module): if not domain: domain = Domain.find(name=getkeyordie("name")) - + if not domain: domain = Domain.add(getkeyordie("name"), getkeyordie("ip")) @@ -203,10 +203,10 @@ def core(module): domain = None if "id" in module.params: domain = Domain.find(id=module.params["id"]) - + if not domain and "name" in module.params: domain = Domain.find(name=module.params["name"]) - + if not domain: module.exit_json(changed=False, msg="Domain not found.") @@ -217,7 +217,7 @@ def core(module): def main(): module = AnsibleModule( argument_spec = dict( - state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'), + state = dict(choices=['present', 'absent'], default='present'), client_id = dict(aliases=['CLIENT_ID'], no_log=True), api_key = dict(aliases=['API_KEY'], no_log=True), name = dict(type='str'), diff --git a/cloud/digital_ocean_sshkey.py b/cloud/digital_ocean/digital_ocean_sshkey.py similarity index 100% rename from cloud/digital_ocean_sshkey.py rename to cloud/digital_ocean/digital_ocean_sshkey.py diff --git a/cloud/docker/__init__.py b/cloud/docker/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/docker_image.py b/cloud/docker/_docker_image.py similarity index 99% rename from cloud/docker_image.py rename to cloud/docker/_docker_image.py index e1388f20f1a..726895c8df7 100644 --- a/cloud/docker_image.py +++ b/cloud/docker/_docker_image.py @@ -23,6 +23,7 @@ DOCUMENTATION = ''' --- module: docker_image +deprecated: "functions are being rolled into the 'docker' module" author: Pavel Antonov version_added: "1.5" short_description: manage docker images diff --git a/cloud/docker.py b/cloud/docker/docker.py similarity index 80% rename from cloud/docker.py rename to cloud/docker/docker.py index a0a52ffc756..b9c379eed4a 100644 --- a/cloud/docker.py +++ b/cloud/docker/docker.py @@ -126,6 +126,12 @@ options: required: false default: null aliases: [] + email: + description: + - Set remote API email + required: false + default: null + aliases: [] hostname: description: - Set container hostname @@ -204,6 +210,27 @@ options: default: '' aliases: [] version_added: "1.8" + restart_policy: + description: + - Set the container restart policy + required: false + default: false + aliases: [] + version_added: "1.9" + restart_policy_retry: + description: + - Set the retry limit for container restart policy + required: false + default: false + aliases: [] + version_added: "1.9" + insecure_registry: + description: + - Use insecure private registry by HTTP instead of HTTPS (needed for docker-py >= 0.5.0). + required: false + default: false + aliases: [] + version_added: "1.9" author: Cove Schneider, Joshua Conner, Pavel Antonov requirements: [ "docker-py >= 0.3.0", "docker >= 0.10.0" ] @@ -336,10 +363,11 @@ try: except ImportError, e: HAS_DOCKER_PY = False -try: - from docker.errors import APIError as DockerAPIError -except ImportError: - from docker.client import APIError as DockerAPIError +if HAS_DOCKER_PY: + try: + from docker.errors import APIError as DockerAPIError + except ImportError: + from docker.client import APIError as DockerAPIError def _human_to_bytes(number): @@ -369,9 +397,81 @@ def _docker_id_quirk(inspect): del inspect['ID'] return inspect -class DockerManager: + +def get_split_image_tag(image): + # If image contains a host or org name, omit that from our check + if '/' in image: + registry, resource = image.rsplit('/', 1) + else: + registry, resource = None, image + + # now we can determine if image has a tag + if ':' in resource: + resource, tag = resource.split(':', 1) + if registry: + resource = '/'.join((registry, resource)) + else: + tag = "latest" + resource = image + + return resource, tag + +def get_docker_py_versioninfo(): + if hasattr(docker, '__version__'): + # a '__version__' attribute was added to the module but not until + # after 0.3.0 was pushed to pypi. If it's there, use it. + version = [] + for part in docker.__version__.split('.'): + try: + version.append(int(part)) + except ValueError: + for idx, char in enumerate(part): + if not char.isdigit(): + nondigit = part[idx:] + digit = part[:idx] + if digit: + version.append(int(digit)) + if nondigit: + version.append(nondigit) + elif hasattr(docker.Client, '_get_raw_response_socket'): + # HACK: if '__version__' isn't there, we check for the existence of + # `_get_raw_response_socket` in the docker.Client class, which was + # added in 0.3.0 + version = (0, 3, 0) + else: + # This is untrue but this module does not function with a version less + # than 0.3.0 so it's okay to lie here. + version = (0,) + + return tuple(version) + +def check_dependencies(module): + """ + Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a + helpful error message if it isn't. + """ + if not HAS_DOCKER_PY: + module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.") + else: + versioninfo = get_docker_py_versioninfo() + if versioninfo < (0, 3, 0): + module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.") + + +class DockerManager(object): counters = {'created':0, 'started':0, 'stopped':0, 'killed':0, 'removed':0, 'restarted':0, 'pull':0} + _capabilities = set() + # Map optional parameters to minimum (docker-py version, server APIVersion) + # docker-py version is a tuple of ints because we have to compare them + # server APIVersion is passed to a docker-py function that takes strings + _cap_ver_req = { + 'dns': ((0, 3, 0), '1.10'), + 'volumes_from': ((0, 3, 0), '1.10'), + 'restart_policy': ((0, 5, 0), '1.14'), + # Clientside only + 'insecure_registry': ((0, 5, 0), '0.0') + } def __init__(self, module): self.module = module @@ -424,8 +524,50 @@ class DockerManager: # connect to docker server docker_url = urlparse(module.params.get('docker_url')) docker_api_version = module.params.get('docker_api_version') + if not docker_api_version: + docker_api_version=docker.client.DEFAULT_DOCKER_API_VERSION self.client = docker.Client(base_url=docker_url.geturl(), version=docker_api_version) + self.docker_py_versioninfo = get_docker_py_versioninfo() + + def _check_capabilties(self): + """ + Create a list of available capabilities + """ + api_version = self.client.version()['ApiVersion'] + for cap, req_vers in self._cap_ver_req.items(): + if (self.docker_py_versioninfo >= req_vers[0] and + docker.utils.compare_version(req_vers[1], api_version) >= 0): + self._capabilities.add(cap) + + def ensure_capability(self, capability, fail=True): + """ + Some of the functionality this ansible module implements are only + available in newer versions of docker. Ensure that the capability + is available here. + + If fail is set to False then return True or False depending on whether + we have the capability. Otherwise, simply fail and exit the module if + we lack the capability. + """ + if not self._capabilities: + self._check_capabilties() + + if capability in self._capabilities: + return True + + if not fail: + return False + + api_version = self.client.version()['ApiVersion'] + self.module.fail_json(msg='Specifying the `%s` parameter requires' + ' docker-py: %s, docker server apiversion %s; found' + ' docker-py: %s, server: %s' % ( + capability, + '.'.join(self._cap_ver_req[capability][0]), + self._cap_ver_req[capability][1], + '.'.join(self.docker_py_versioninfo), + api_version)) def get_links(self, links): """ @@ -505,24 +647,6 @@ class DockerManager: return binds - def get_split_image_tag(self, image): - # If image contains a host or org name, omit that from our check - if '/' in image: - registry, resource = image.rsplit('/', 1) - else: - registry, resource = None, image - - # now we can determine if image has a tag - if ':' in resource: - resource, tag = resource.split(':', 1) - if registry: - resource = '/'.join((registry, resource)) - else: - tag = "latest" - resource = image - - return resource, tag - def get_summary_counters_msg(self): msg = "" for k, v in self.counters.iteritems(): @@ -562,10 +686,10 @@ class DockerManager: # if we weren't given a tag with the image, we need to only compare on the image name, as that # docker will give us back the full image name including a tag in the container list if one exists. - image, tag = self.get_split_image_tag(image) + image, tag = get_split_image_tag(image) for i in self.client.containers(all=True): - running_image, running_tag = self.get_split_image_tag(i['Image']) + running_image, running_tag = get_split_image_tag(i['Image']) running_command = i['Command'].strip() name_matches = False @@ -604,11 +728,20 @@ class DockerManager: 'name': self.module.params.get('name'), 'stdin_open': self.module.params.get('stdin_open'), 'tty': self.module.params.get('tty'), + 'dns': self.module.params.get('dns'), + 'volumes_from': self.module.params.get('volumes_from'), } - if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) < 0: - params['dns'] = self.module.params.get('dns') - params['volumes_from'] = self.module.params.get('volumes_from') + if params['dns'] is not None: + self.ensure_capability('dns') + + if params['volumes_from'] is not None: + self.ensure_capability('volumes_from') + + extra_params = {} + if self.module.params.get('insecure_registry'): + if self.ensure_capability('insecure_registry', fail=False): + extra_params['insecure_registry'] = self.module.params.get('insecure_registry') def do_create(count, params): results = [] @@ -623,7 +756,7 @@ class DockerManager: containers = do_create(count, params) except: resource = self.module.params.get('image') - image, tag = self.get_split_image_tag(resource) + image, tag = get_split_image_tag(resource) if self.module.params.get('username'): try: self.client.login( @@ -635,7 +768,7 @@ class DockerManager: except: self.module.fail_json(msg="failed to login to the remote registry, check your username/password.") try: - self.client.pull(image, tag=tag) + self.client.pull(image, tag=tag, **extra_params) except: self.module.fail_json(msg="failed to pull the specified image: %s" % resource) self.increment_counter('pull') @@ -653,9 +786,24 @@ class DockerManager: 'links': self.links, 'network_mode': self.module.params.get('net'), } - if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) >= 0 and hasattr(docker, '__version__') and docker.__version__ > '0.3.0': - params['dns'] = self.module.params.get('dns') - params['volumes_from'] = self.module.params.get('volumes_from') + + optionals = {} + for optional_param in ('dns', 'volumes_from', 'restart_policy', 'restart_policy_retry'): + optionals[optional_param] = self.module.params.get(optional_param) + + if optionals['dns'] is not None: + self.ensure_capability('dns') + params['dns'] = optionals['dns'] + + if optionals['volumes_from'] is not None: + self.ensure_capability('volumes_from') + params['volumes_from'] = optionals['volumes_from'] + + if optionals['restart_policy'] is not None: + self.ensure_capability('restart_policy') + params['restart_policy'] = { 'Name': optionals['restart_policy'] } + if params['restart_policy']['Name'] == 'on-failure': + params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry'] for i in containers: self.client.start(i['Id'], **params) @@ -684,31 +832,6 @@ class DockerManager: self.increment_counter('restarted') -def check_dependencies(module): - """ - Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a - helpful error message if it isn't. - """ - if not HAS_DOCKER_PY: - module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.") - else: - HAS_NEW_ENOUGH_DOCKER_PY = False - if hasattr(docker, '__version__'): - # a '__version__' attribute was added to the module but not until - # after 0.3.0 was added pushed to pip. If it's there, use it. - if docker.__version__ >= '0.3.0': - HAS_NEW_ENOUGH_DOCKER_PY = True - else: - # HACK: if '__version__' isn't there, we check for the existence of - # `_get_raw_response_socket` in the docker.Client class, which was - # added in 0.3.0 - if hasattr(docker.Client, '_get_raw_response_socket'): - HAS_NEW_ENOUGH_DOCKER_PY = True - - if not HAS_NEW_ENOUGH_DOCKER_PY: - module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.") - - def main(): module = AnsibleModule( argument_spec = dict( @@ -724,7 +847,7 @@ def main(): memory_limit = dict(default=0), memory_swap = dict(default=0), docker_url = dict(default='unix://var/run/docker.sock'), - docker_api_version = dict(default=docker.client.DEFAULT_DOCKER_API_VERSION), + docker_api_version = dict(), username = dict(default=None), password = dict(), email = dict(), @@ -734,13 +857,16 @@ def main(): dns = dict(), detach = dict(default=True, type='bool'), state = dict(default='running', choices=['absent', 'present', 'running', 'stopped', 'killed', 'restarted']), + restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']), + restart_policy_retry = dict(default=0, type='int'), debug = dict(default=False, type='bool'), privileged = dict(default=False, type='bool'), stdin_open = dict(default=False, type='bool'), tty = dict(default=False, type='bool'), lxc_conf = dict(default=None, type='list'), name = dict(default=None), - net = dict(default=None) + net = dict(default=None), + insecure_registry = dict(default=False, type='bool'), ) ) @@ -851,4 +977,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/cloud/google/__init__.py b/cloud/google/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/gc_storage.py b/cloud/google/gc_storage.py similarity index 99% rename from cloud/gc_storage.py rename to cloud/google/gc_storage.py index 1963a148da2..28beea05783 100644 --- a/cloud/gc_storage.py +++ b/cloud/google/gc_storage.py @@ -319,11 +319,12 @@ def handle_create(module, gs, bucket, obj): else: module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket)) if bucket and obj: + if obj.endswith('/'): + dirobj = obj + else: + dirobj = obj + "/" + if bucket_check(module, gs, bucket): - if obj.endswith('/'): - dirobj = obj - else: - dirobj = obj + "/" if key_check(module, gs, bucket, dirobj): module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False) else: diff --git a/cloud/gce.py b/cloud/google/gce.py old mode 100755 new mode 100644 similarity index 100% rename from cloud/gce.py rename to cloud/google/gce.py diff --git a/cloud/gce_lb.py b/cloud/google/gce_lb.py similarity index 100% rename from cloud/gce_lb.py rename to cloud/google/gce_lb.py diff --git a/cloud/gce_net.py b/cloud/google/gce_net.py similarity index 98% rename from cloud/gce_net.py rename to cloud/google/gce_net.py index c2c0b30452d..10592d20033 100644 --- a/cloud/gce_net.py +++ b/cloud/google/gce_net.py @@ -35,7 +35,7 @@ options: description: - the protocol:ports to allow ('tcp:80' or 'tcp:80,443' or 'tcp:80-800') required: false - default: null + default: null aliases: [] ipv4_range: description: @@ -101,15 +101,16 @@ author: Eric Johnson EXAMPLES = ''' # Simple example of creating a new network -- local_action: +- local_action: module: gce_net name: privatenet ipv4_range: '10.240.16.0/24' - + # Simple example of creating a new firewall rule -- local_action: +- local_action: module: gce_net name: privatenet + fwname: all-web-webproxy allowed: tcp:80,8080 src_tags: ["web", "proxy"] @@ -155,7 +156,7 @@ def main(): ipv4_range = dict(), fwname = dict(), name = dict(), - src_range = dict(), + src_range = dict(type='list'), src_tags = dict(type='list'), state = dict(default='present'), service_account_email = dict(), diff --git a/cloud/gce_pd.py b/cloud/google/gce_pd.py similarity index 100% rename from cloud/gce_pd.py rename to cloud/google/gce_pd.py diff --git a/cloud/linode/__init__.py b/cloud/linode/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/linode.py b/cloud/linode/linode.py similarity index 100% rename from cloud/linode.py rename to cloud/linode/linode.py diff --git a/cloud/openstack/__init__.py b/cloud/openstack/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/glance_image.py b/cloud/openstack/glance_image.py similarity index 99% rename from cloud/glance_image.py rename to cloud/openstack/glance_image.py index 3bbc6f0ebca..6425fa2ca5d 100644 --- a/cloud/glance_image.py +++ b/cloud/openstack/glance_image.py @@ -254,7 +254,7 @@ def main(): else: _glance_delete_image(module, module.params, client) -# this is magic, see lib/ansible/module.params['common.py +# this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * main() diff --git a/cloud/keystone_user.py b/cloud/openstack/keystone_user.py similarity index 99% rename from cloud/keystone_user.py rename to cloud/openstack/keystone_user.py index 5b412ca8008..4af254bfe6d 100644 --- a/cloud/keystone_user.py +++ b/cloud/openstack/keystone_user.py @@ -291,6 +291,9 @@ def main(): argument_spec.update(dict( tenant_description=dict(required=False), email=dict(required=False), + user=dict(required=False), + tenant=dict(required=False), + password=dict(required=False), role=dict(required=False), state=dict(default='present', choices=['present', 'absent']), endpoint=dict(required=False, diff --git a/cloud/nova_compute.py b/cloud/openstack/nova_compute.py similarity index 99% rename from cloud/nova_compute.py rename to cloud/openstack/nova_compute.py index 42c54753fb8..b51a1891a7d 100644 --- a/cloud/nova_compute.py +++ b/cloud/openstack/nova_compute.py @@ -121,10 +121,10 @@ options: description: - Should a floating ip be auto created and assigned required: false - default: 'yes' + default: 'no' version_added: "1.8" floating_ips: - decription: + description: - list of valid floating IPs that pre-exist to assign to this node required: false default: None @@ -405,7 +405,7 @@ def _get_flavor_id(module, nova): if (flavor.ram >= module.params['flavor_ram'] and (not module.params['flavor_include'] or module.params['flavor_include'] in flavor.name)): return flavor.id - module.fail_json(msg = "Error finding flavor with %sMB of RAM" % module.params['flavor_ram']) + module.fail_json(msg = "Error finding flavor with %sMB of RAM" % module.params['flavor_ram']) return module.params['flavor_id'] diff --git a/cloud/nova_keypair.py b/cloud/openstack/nova_keypair.py similarity index 100% rename from cloud/nova_keypair.py rename to cloud/openstack/nova_keypair.py diff --git a/cloud/quantum_floating_ip.py b/cloud/openstack/quantum_floating_ip.py similarity index 100% rename from cloud/quantum_floating_ip.py rename to cloud/openstack/quantum_floating_ip.py diff --git a/cloud/quantum_floating_ip_associate.py b/cloud/openstack/quantum_floating_ip_associate.py similarity index 100% rename from cloud/quantum_floating_ip_associate.py rename to cloud/openstack/quantum_floating_ip_associate.py diff --git a/cloud/quantum_network.py b/cloud/openstack/quantum_network.py similarity index 100% rename from cloud/quantum_network.py rename to cloud/openstack/quantum_network.py diff --git a/cloud/quantum_router.py b/cloud/openstack/quantum_router.py similarity index 100% rename from cloud/quantum_router.py rename to cloud/openstack/quantum_router.py diff --git a/cloud/quantum_router_gateway.py b/cloud/openstack/quantum_router_gateway.py similarity index 100% rename from cloud/quantum_router_gateway.py rename to cloud/openstack/quantum_router_gateway.py diff --git a/cloud/quantum_router_interface.py b/cloud/openstack/quantum_router_interface.py similarity index 100% rename from cloud/quantum_router_interface.py rename to cloud/openstack/quantum_router_interface.py diff --git a/cloud/quantum_subnet.py b/cloud/openstack/quantum_subnet.py similarity index 100% rename from cloud/quantum_subnet.py rename to cloud/openstack/quantum_subnet.py diff --git a/cloud/rackspace/__init__.py b/cloud/rackspace/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/rax.py b/cloud/rackspace/rax.py similarity index 78% rename from cloud/rax.py rename to cloud/rackspace/rax.py index e01367ed5bd..5fa1b57386a 100644 --- a/cloud/rax.py +++ b/cloud/rackspace/rax.py @@ -64,7 +64,10 @@ options: exact_count: description: - Explicitly ensure an exact count of instances, used with - state=active/present + state=active/present. If specified as C(yes) and I(count) is less than + the servers matched, servers will be deleted to match the count. If + the number of matched servers is fewer than specified in I(count) + additional servers will be added. default: no choices: - "yes" @@ -150,6 +153,12 @@ options: - how long before wait gives up, in seconds default: 300 author: Jesse Keating, Matt Martz +notes: + - I(exact_count) can be "destructive" if the number of running servers in + the I(group) is larger than that specified in I(count). In such a case, the + I(state) is effectively set to C(absent) and the extra servers are deleted. + In the case of deletion, the returned data structure will have C(action) + set to C(delete), and the oldest servers in the group will be deleted. extends_documentation_fragment: rackspace.openstack ''' @@ -441,79 +450,102 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, if group is None: module.fail_json(msg='"group" must be provided when using ' '"exact_count"') - else: - if auto_increment: - numbers = set() - try: - name % 0 - except TypeError, e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message) + if auto_increment: + numbers = set() - pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) - for server in cs.servers.list(): - if server.metadata.get('group') == group: - servers.append(server) - match = re.search(pattern, server.name) - if match: - number = int(match.group(1)) - numbers.add(number) - - number_range = xrange(count_offset, count_offset + count) - available_numbers = list(set(number_range) - .difference(numbers)) - else: - for server in cs.servers.list(): - if server.metadata.get('group') == group: - servers.append(server) - - # If state was absent but the count was changed, - # assume we only wanted to remove that number of instances - if was_absent: - diff = len(servers) - count - if diff < 0: - count = 0 + # See if the name is a printf like string, if not append + # %d to the end + try: + name % 0 + except TypeError, e: + if e.message.startswith('not all'): + name = '%s%%d' % name else: - count = diff + module.fail_json(msg=e.message) - if len(servers) > count: - state = 'absent' - kept = servers[:count] - del servers[:count] - instance_ids = [] - for server in servers: - instance_ids.append(server.id) - delete(module, instance_ids=instance_ids, wait=wait, - wait_timeout=wait_timeout, kept=kept) - elif len(servers) < count: - if auto_increment: - names = [] - name_slice = count - len(servers) - numbers_to_use = available_numbers[:name_slice] - for number in numbers_to_use: - names.append(name % number) - else: - names = [name] * (count - len(servers)) + # regex pattern to match printf formatting + pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) + for server in cs.servers.list(): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + if server.metadata.get('group') == group: + servers.append(server) + match = re.search(pattern, server.name) + if match: + number = int(match.group(1)) + numbers.add(number) + + number_range = xrange(count_offset, count_offset + count) + available_numbers = list(set(number_range) + .difference(numbers)) + else: # Not auto incrementing + for server in cs.servers.list(): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + if server.metadata.get('group') == group: + servers.append(server) + # available_numbers not needed here, we inspect auto_increment + # again later + + # If state was absent but the count was changed, + # assume we only wanted to remove that number of instances + if was_absent: + diff = len(servers) - count + if diff < 0: + count = 0 else: - instances = [] - instance_ids = [] - for server in servers: - instances.append(rax_to_dict(server, 'server')) - instance_ids.append(server.id) - module.exit_json(changed=False, action=None, - instances=instances, - success=[], error=[], timeout=[], - instance_ids={'instances': instance_ids, - 'success': [], 'error': [], - 'timeout': []}) - else: + count = diff + + if len(servers) > count: + # We have more servers than we need, set state='absent' + # and delete the extras, this should delete the oldest + state = 'absent' + kept = servers[:count] + del servers[:count] + instance_ids = [] + for server in servers: + instance_ids.append(server.id) + delete(module, instance_ids=instance_ids, wait=wait, + wait_timeout=wait_timeout, kept=kept) + elif len(servers) < count: + # we have fewer servers than we need + if auto_increment: + # auto incrementing server numbers + names = [] + name_slice = count - len(servers) + numbers_to_use = available_numbers[:name_slice] + for number in numbers_to_use: + names.append(name % number) + else: + # We are not auto incrementing server numbers, + # create a list of 'name' that matches how many we need + names = [name] * (count - len(servers)) + else: + # we have the right number of servers, just return info + # about all of the matched servers + instances = [] + instance_ids = [] + for server in servers: + instances.append(rax_to_dict(server, 'server')) + instance_ids.append(server.id) + module.exit_json(changed=False, action=None, + instances=instances, + success=[], error=[], timeout=[], + instance_ids={'instances': instance_ids, + 'success': [], 'error': [], + 'timeout': []}) + else: # not called with exact_count=True if group is not None: if auto_increment: + # we are auto incrementing server numbers, but not with + # exact_count numbers = set() + # See if the name is a printf like string, if not append + # %d to the end try: name % 0 except TypeError, e: @@ -522,8 +554,12 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, else: module.fail_json(msg=e.message) + # regex pattern to match printf formatting pattern = re.sub(r'%\d*[sd]', r'(\d+)', name) for server in cs.servers.list(): + # Ignore DELETED servers + if server.status == 'DELETED': + continue if server.metadata.get('group') == group: servers.append(server) match = re.search(pattern, server.name) @@ -540,8 +576,11 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, for number in numbers_to_use: names.append(name % number) else: + # Not auto incrementing names = [name] * count else: + # No group was specified, and not using exact_count + # Perform more simplistic matching search_opts = { 'name': '^%s$' % name, 'image': image, @@ -549,11 +588,18 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, } servers = [] for server in cs.servers.list(search_opts=search_opts): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + # Ignore servers with non matching metadata if server.metadata != meta: continue servers.append(server) if len(servers) >= count: + # We have more servers than were requested, don't do + # anything. Not running with exact_count=True, so we assume + # more is OK instances = [] for server in servers: instances.append(rax_to_dict(server, 'server')) @@ -566,6 +612,8 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, 'success': [], 'error': [], 'timeout': []}) + # We need more servers to reach out target, create names for + # them, we aren't performing auto_increment here names = [name] * (count - len(servers)) create(module, names=names, flavor=flavor, image=image, @@ -577,6 +625,8 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, elif state == 'absent': if instance_ids is None: + # We weren't given an explicit list of server IDs to delete + # Let's match instead for arg, value in dict(name=name, flavor=flavor, image=image).iteritems(): if not value: @@ -588,10 +638,15 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, 'flavor': flavor } for server in cs.servers.list(search_opts=search_opts): + # Ignore DELETED servers + if server.status == 'DELETED': + continue + # Ignore servers with non matching metadata if meta != server.metadata: continue servers.append(server) + # Build a list of server IDs to delete instance_ids = [] for server in servers: if len(instance_ids) < count: @@ -600,6 +655,8 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None, break if not instance_ids: + # No server IDs were matched for deletion, or no IDs were + # explicitly provided, just exit and don't do anything module.exit_json(changed=False, action=None, instances=[], success=[], error=[], timeout=[], instance_ids={'instances': [], diff --git a/cloud/rax_cbs.py b/cloud/rackspace/rax_cbs.py similarity index 94% rename from cloud/rax_cbs.py rename to cloud/rackspace/rax_cbs.py index a1b6ce46a6e..261168889cc 100644 --- a/cloud/rax_cbs.py +++ b/cloud/rackspace/rax_cbs.py @@ -108,10 +108,6 @@ except ImportError: def cloud_block_storage(module, state, name, description, meta, size, snapshot_id, volume_type, wait, wait_timeout): - for arg in (state, name, size, volume_type): - if not arg: - module.fail_json(msg='%s is required for rax_cbs' % arg) - if size < 100: module.fail_json(msg='"size" must be greater than or equal to 100') @@ -145,10 +141,7 @@ def cloud_block_storage(module, state, name, description, meta, size, attempts=attempts) volume.get() - for key, value in vars(volume).iteritems(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value + instance = rax_to_dict(volume) result = dict(changed=changed, volume=instance) @@ -164,6 +157,7 @@ def cloud_block_storage(module, state, name, description, meta, size, elif state == 'absent': if volume: + instance = rax_to_dict(volume) try: volume.delete() changed = True diff --git a/cloud/rax_cbs_attachments.py b/cloud/rackspace/rax_cbs_attachments.py similarity index 92% rename from cloud/rax_cbs_attachments.py rename to cloud/rackspace/rax_cbs_attachments.py index 365f93cd6e2..870b8e611df 100644 --- a/cloud/rax_cbs_attachments.py +++ b/cloud/rackspace/rax_cbs_attachments.py @@ -90,11 +90,6 @@ except ImportError: def cloud_block_storage_attachments(module, state, volume, server, device, wait, wait_timeout): - for arg in (state, volume, server, device): - if not arg: - module.fail_json(msg='%s is required for rax_cbs_attachments' % - arg) - cbs = pyrax.cloud_blockstorage cs = pyrax.cloudservers @@ -133,7 +128,7 @@ def cloud_block_storage_attachments(module, state, volume, server, device, not key.startswith('_')): instance[key] = value - result = dict(changed=changed, volume=instance) + result = dict(changed=changed) if volume.status == 'error': result['msg'] = '%s failed to build' % volume.id @@ -142,6 +137,9 @@ def cloud_block_storage_attachments(module, state, volume, server, device, pyrax.utils.wait_until(volume, 'status', 'in-use', interval=5, attempts=attempts) + volume.get() + result['volume'] = rax_to_dict(volume) + if 'msg' in result: module.fail_json(**result) else: @@ -167,12 +165,7 @@ def cloud_block_storage_attachments(module, state, volume, server, device, elif volume.attachments: module.fail_json(msg='Volume is attached to another server') - for key, value in vars(volume).iteritems(): - if (isinstance(value, NON_CALLABLES) and - not key.startswith('_')): - instance[key] = value - - result = dict(changed=changed, volume=instance) + result = dict(changed=changed, volume=rax_to_dict(volume)) if volume.status == 'error': result['msg'] = '%s failed to build' % volume.id diff --git a/cloud/rax_cdb.py b/cloud/rackspace/rax_cdb.py similarity index 100% rename from cloud/rax_cdb.py rename to cloud/rackspace/rax_cdb.py diff --git a/cloud/rax_cdb_database.py b/cloud/rackspace/rax_cdb_database.py similarity index 100% rename from cloud/rax_cdb_database.py rename to cloud/rackspace/rax_cdb_database.py diff --git a/cloud/rax_cdb_user.py b/cloud/rackspace/rax_cdb_user.py similarity index 100% rename from cloud/rax_cdb_user.py rename to cloud/rackspace/rax_cdb_user.py diff --git a/cloud/rax_clb.py b/cloud/rackspace/rax_clb.py similarity index 98% rename from cloud/rax_clb.py rename to cloud/rackspace/rax_clb.py index 7a2699709da..38baa77b6ff 100644 --- a/cloud/rax_clb.py +++ b/cloud/rackspace/rax_clb.py @@ -140,10 +140,6 @@ except ImportError: def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol, vip_type, timeout, wait, wait_timeout, vip_id): - for arg in (state, name, port, protocol, vip_type): - if not arg: - module.fail_json(msg='%s is required for rax_clb' % arg) - if int(timeout) < 30: module.fail_json(msg='"timeout" must be greater than or equal to 30') @@ -257,7 +253,7 @@ def main(): algorithm=dict(choices=CLB_ALGORITHMS, default='LEAST_CONNECTIONS'), meta=dict(type='dict', default={}), - name=dict(), + name=dict(required=True), port=dict(type='int', default=80), protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'), state=dict(default='present', choices=['present', 'absent']), diff --git a/cloud/rax_clb_nodes.py b/cloud/rackspace/rax_clb_nodes.py similarity index 92% rename from cloud/rax_clb_nodes.py rename to cloud/rackspace/rax_clb_nodes.py index 24325b44597..472fad19b1c 100644 --- a/cloud/rax_clb_nodes.py +++ b/cloud/rackspace/rax_clb_nodes.py @@ -150,21 +150,6 @@ def _get_node(lb, node_id=None, address=None, port=None): return None -def _is_primary(node): - """Return True if node is primary and enabled""" - return (node.type.lower() == 'primary' and - node.condition.lower() == 'enabled') - - -def _get_primary_nodes(lb): - """Return a list of primary and enabled nodes""" - nodes = [] - for node in lb.nodes: - if _is_primary(node): - nodes.append(node) - return nodes - - def main(): argument_spec = rax_argument_spec() argument_spec.update( @@ -230,13 +215,6 @@ def main(): if state == 'absent': if not node: # Removing a non-existent node module.exit_json(changed=False, state=state) - - # The API detects this as well but currently pyrax does not return a - # meaningful error message - if _is_primary(node) and len(_get_primary_nodes(lb)) == 1: - module.fail_json( - msg='At least one primary node has to be enabled') - try: lb.delete_node(node) result = {} @@ -299,5 +277,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.rax import * -### invoke the module +# invoke the module main() diff --git a/cloud/rax_dns.py b/cloud/rackspace/rax_dns.py similarity index 100% rename from cloud/rax_dns.py rename to cloud/rackspace/rax_dns.py diff --git a/cloud/rax_dns_record.py b/cloud/rackspace/rax_dns_record.py similarity index 100% rename from cloud/rax_dns_record.py rename to cloud/rackspace/rax_dns_record.py diff --git a/cloud/rax_facts.py b/cloud/rackspace/rax_facts.py similarity index 100% rename from cloud/rax_facts.py rename to cloud/rackspace/rax_facts.py diff --git a/cloud/rax_files.py b/cloud/rackspace/rax_files.py similarity index 100% rename from cloud/rax_files.py rename to cloud/rackspace/rax_files.py diff --git a/cloud/rax_files_objects.py b/cloud/rackspace/rax_files_objects.py similarity index 100% rename from cloud/rax_files_objects.py rename to cloud/rackspace/rax_files_objects.py diff --git a/cloud/rax_identity.py b/cloud/rackspace/rax_identity.py similarity index 91% rename from cloud/rax_identity.py rename to cloud/rackspace/rax_identity.py index ea40ea2ef46..47b4cb60cf0 100644 --- a/cloud/rax_identity.py +++ b/cloud/rackspace/rax_identity.py @@ -55,10 +55,6 @@ except ImportError: def cloud_identity(module, state, identity): - for arg in (state, identity): - if not arg: - module.fail_json(msg='%s is required for rax_identity' % arg) - instance = dict( authenticated=identity.authenticated, credentials=identity._creds_file @@ -79,7 +75,7 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - state=dict(default='present', choices=['present', 'absent']) + state=dict(default='present', choices=['present']) ) ) @@ -95,7 +91,7 @@ def main(): setup_rax_module(module, pyrax) - if pyrax.identity is None: + if not pyrax.identity: module.fail_json(msg='Failed to instantiate client. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') @@ -106,5 +102,5 @@ def main(): from ansible.module_utils.basic import * from ansible.module_utils.rax import * -### invoke the module +# invoke the module main() diff --git a/cloud/rax_keypair.py b/cloud/rackspace/rax_keypair.py similarity index 98% rename from cloud/rax_keypair.py rename to cloud/rackspace/rax_keypair.py index 591ad8c3597..8f38abc12e0 100644 --- a/cloud/rax_keypair.py +++ b/cloud/rackspace/rax_keypair.py @@ -104,7 +104,7 @@ def rax_keypair(module, name, public_key, state): keypair = {} if state == 'present': - if os.path.isfile(public_key): + if public_key and os.path.isfile(public_key): try: f = open(public_key) public_key = f.read() @@ -143,7 +143,7 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( - name=dict(), + name=dict(required=True), public_key=dict(), state=dict(default='present', choices=['absent', 'present']), ) diff --git a/cloud/rax_meta.py b/cloud/rackspace/rax_meta.py similarity index 100% rename from cloud/rax_meta.py rename to cloud/rackspace/rax_meta.py diff --git a/cloud/rax_network.py b/cloud/rackspace/rax_network.py similarity index 96% rename from cloud/rax_network.py rename to cloud/rackspace/rax_network.py index bc4745a7a84..bd23f5f878d 100644 --- a/cloud/rax_network.py +++ b/cloud/rackspace/rax_network.py @@ -65,10 +65,6 @@ except ImportError: def cloud_network(module, state, label, cidr): - for arg in (state, label, cidr): - if not arg: - module.fail_json(msg='%s is required for cloud_networks' % arg) - changed = False network = None networks = [] @@ -79,6 +75,9 @@ def cloud_network(module, state, label, cidr): 'incorrectly capitalized region name.') if state == 'present': + if not cidr: + module.fail_json(msg='missing required arguments: cidr') + try: network = pyrax.cloud_networks.find_network_by_label(label) except pyrax.exceptions.NetworkNotFound: @@ -115,7 +114,7 @@ def main(): dict( state=dict(default='present', choices=['present', 'absent']), - label=dict(), + label=dict(required=True), cidr=dict() ) ) diff --git a/cloud/rax_queue.py b/cloud/rackspace/rax_queue.py similarity index 100% rename from cloud/rax_queue.py rename to cloud/rackspace/rax_queue.py diff --git a/cloud/rax_scaling_group.py b/cloud/rackspace/rax_scaling_group.py similarity index 80% rename from cloud/rax_scaling_group.py rename to cloud/rackspace/rax_scaling_group.py index d884d3c1303..64783397016 100644 --- a/cloud/rax_scaling_group.py +++ b/cloud/rackspace/rax_scaling_group.py @@ -24,6 +24,14 @@ description: - Manipulate Rackspace Cloud Autoscale Groups version_added: 1.7 options: + config_drive: + description: + - Attach read-only configuration drive to server as label config-2 + default: no + choices: + - "yes" + - "no" + version_added: 1.8 cooldown: description: - The period of time, in seconds, that must pass before any scaling can @@ -92,6 +100,11 @@ options: - present - absent default: present + user_data: + description: + - Data to be uploaded to the servers config drive. This option implies + I(config_drive). Can be a file path or a string + version_added: 1.8 author: Matt Martz extends_documentation_fragment: rackspace ''' @@ -118,6 +131,8 @@ EXAMPLES = ''' register: asg ''' +import base64 + try: import pyrax HAS_PYRAX = True @@ -128,17 +143,27 @@ except ImportError: def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, image=None, key_name=None, loadbalancers=[], meta={}, min_entities=0, max_entities=0, name=None, networks=[], - server_name=None, state='present'): + server_name=None, state='present', user_data=None, + config_drive=False): changed = False au = pyrax.autoscale - cnw = pyrax.cloud_networks - cs = pyrax.cloudservers - if not au or not cnw or not cs: + if not au: module.fail_json(msg='Failed to instantiate clients. This ' 'typically indicates an invalid region or an ' 'incorrectly capitalized region name.') + if user_data: + config_drive = True + + if user_data and os.path.isfile(user_data): + try: + f = open(user_data) + user_data = f.read() + f.close() + except Exception, e: + module.fail_json(msg='Failed to load %s' % user_data) + if state == 'present': # Normalize and ensure all metadata values are strings if meta: @@ -184,8 +209,16 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, lbs = [] if loadbalancers: for lb in loadbalancers: - lb_id = lb.get('id') - port = lb.get('port') + try: + lb_id = int(lb.get('id')) + except (ValueError, TypeError): + module.fail_json(msg='Load balancer ID is not an integer: ' + '%s' % lb.get('id')) + try: + port = int(lb.get('port')) + except (ValueError, TypeError): + module.fail_json(msg='Load balancer port is not an ' + 'integer: %s' % lb.get('port')) if not lb_id or not port: continue lbs.append((lb_id, port)) @@ -202,9 +235,10 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, launch_config_type='launch_server', server_name=server_name, image=image, flavor=flavor, disk_config=disk_config, - metadata=meta, personality=files, + metadata=meta, personality=personality, networks=nics, load_balancers=lbs, - key_name=key_name) + key_name=key_name, config_drive=config_drive, + user_data=user_data) changed = True except Exception, e: module.fail_json(msg='%s' % e.message) @@ -237,14 +271,23 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, if flavor != lc.get('flavor'): lc_args['flavor'] = flavor - if disk_config != lc.get('disk_config'): + disk_config = disk_config or 'AUTO' + if ((disk_config or lc.get('disk_config')) and + disk_config != lc.get('disk_config')): lc_args['disk_config'] = disk_config - if meta != lc.get('metadata'): + if (meta or lc.get('meta')) and meta != lc.get('metadata'): lc_args['metadata'] = meta - if files != lc.get('personality'): - lc_args['personality'] = files + test_personality = [] + for p in personality: + test_personality.append({ + 'path': p['path'], + 'contents': base64.b64encode(p['contents']) + }) + if ((test_personality or lc.get('personality')) and + test_personality != lc.get('personality')): + lc_args['personality'] = personality if nics != lc.get('networks'): lc_args['networks'] = nics @@ -256,6 +299,13 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None, if key_name != lc.get('key_name'): lc_args['key_name'] = key_name + if config_drive != lc.get('config_drive'): + lc_args['config_drive'] = config_drive + + if (user_data and + base64.b64encode(user_data) != lc.get('user_data')): + lc_args['user_data'] = user_data + if lc_args: # Work around for https://github.com/rackspace/pyrax/pull/389 if 'flavor' not in lc_args: @@ -284,9 +334,10 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( + config_drive=dict(default=False, type='bool'), cooldown=dict(type='int', default=300), disk_config=dict(choices=['auto', 'manual']), - files=dict(type='list', default=[]), + files=dict(type='dict', default={}), flavor=dict(required=True), image=dict(required=True), key_name=dict(), @@ -298,6 +349,7 @@ def main(): networks=dict(type='list', default=['public', 'private']), server_name=dict(required=True), state=dict(default='present', choices=['present', 'absent']), + user_data=dict(no_log=True), ) ) @@ -309,6 +361,7 @@ def main(): if not HAS_PYRAX: module.fail_json(msg='pyrax is required for this module') + config_drive = module.params.get('config_drive') cooldown = module.params.get('cooldown') disk_config = module.params.get('disk_config') if disk_config: @@ -325,6 +378,7 @@ def main(): networks = module.params.get('networks') server_name = module.params.get('server_name') state = module.params.get('state') + user_data = module.params.get('user_data') if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000: module.fail_json(msg='min_entities and max_entities must be an ' @@ -340,7 +394,7 @@ def main(): key_name=key_name, loadbalancers=loadbalancers, min_entities=min_entities, max_entities=max_entities, name=name, networks=networks, server_name=server_name, - state=state) + state=state, config_drive=config_drive, user_data=user_data) # import module snippets diff --git a/cloud/rax_scaling_policy.py b/cloud/rackspace/rax_scaling_policy.py similarity index 100% rename from cloud/rax_scaling_policy.py rename to cloud/rackspace/rax_scaling_policy.py diff --git a/cloud/vmware/__init__.py b/cloud/vmware/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cloud/vsphere_guest.py b/cloud/vmware/vsphere_guest.py similarity index 91% rename from cloud/vsphere_guest.py rename to cloud/vmware/vsphere_guest.py index a91a8199dda..8ad7df41dea 100644 --- a/cloud/vsphere_guest.py +++ b/cloud/vmware/vsphere_guest.py @@ -38,7 +38,7 @@ options: description: - The virtual server name you wish to manage. required: true - user: + username: description: - Username to connect to vcenter as. required: true @@ -65,9 +65,20 @@ options: default: null state: description: - - Indicate desired state of the vm. + - Indicate desired state of the vm. default: present - choices: ['present', 'powered_on', 'absent', 'powered_off', 'restarted', 'reconfigured'] + choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured'] + from_template: + version_added: "1.9" + description: + - Specifies if the VM should be deployed from a template (cannot be ran with state) + default: no + choices: ['yes', 'no'] + template_src: + version_added: "1.9" + description: + - Name of the source template to deploy from + default: None vm_disk: description: - A key, value list of disks and their sizes and which datastore to keep it in. @@ -181,6 +192,18 @@ EXAMPLES = ''' datacenter: MyDatacenter hostname: esx001.mydomain.local +# Deploy a guest from a template +# No reconfiguration of the destination guest is done at this stage, a reconfigure would be needed to adjust memory/cpu etc.. +- vsphere_guest: + vcenter_hostname: vcenter.mydomain.local + username: myuser + password: mypass + guest: newvm001 + from_template: yes + template_src: centosTemplate + cluster: MainCluster + resource_pool: "/Resources" + # Task to gather facts from a vSphere cluster only if the system is a VMWare guest - vsphere_guest: @@ -192,12 +215,14 @@ EXAMPLES = ''' # Typical output of a vsphere_facts run on a guest +# If vmware tools is not installed, ipadresses with return None - hw_eth0: - addresstype: "assigned" label: "Network adapter 1" macaddress: "00:22:33:33:44:55" macaddress_dash: "00-22-33-33-44-55" + ipaddresses: ['192.0.2.100', '2001:DB8:56ff:feac:4d8a'] summary: "VM Network" hw_guest_full_name: "newvm001" hw_guest_id: "rhel6_64Guest" @@ -207,7 +232,7 @@ EXAMPLES = ''' hw_product_uuid: "ef50bac8-2845-40ff-81d9-675315501dac" # Remove a vm from vSphere -# The VM must be powered_off of you need to use force to force a shutdown +# The VM must be powered_off or you need to use force to force a shutdown - vsphere_guest: vcenter_hostname: vcenter.mydomain.local @@ -488,6 +513,49 @@ def vmdisk_id(vm, current_datastore_name): return id_list +def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name): + vmTemplate = vsphere_client.get_vm_by_name(template_src) + vmTarget = None + + try: + cluster = [k for k, + v in vsphere_client.get_clusters().items() if v == cluster_name][0] + except IndexError, e: + vsphere_client.disconnect() + module.fail_json(msg="Cannot find Cluster named: %s" % + cluster_name) + + try: + rpmor = [k for k, v in vsphere_client.get_resource_pools( + from_mor=cluster).items() + if v == resource_pool][0] + except IndexError, e: + vsphere_client.disconnect() + module.fail_json(msg="Cannot find Resource Pool named: %s" % + resource_pool) + + try: + vmTarget = vsphere_client.get_vm_by_name(guest) + except Exception: + pass + if not vmTemplate.properties.config.template: + module.fail_json( + msg="Target %s is not a registered template" % template_src + ) + try: + if vmTarget: + changed = False + else: + vmTemplate.clone(guest, resourcepool=rpmor) + changed = True + vsphere_client.disconnect() + module.exit_json(changed=changed) + except Exception as e: + module.fail_json( + msg="Could not clone selected machine: %s" % e + ) + + def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force): spec = None changed = False @@ -618,7 +686,16 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, hfmor = dcprops.hostFolder._obj # virtualmachineFolder managed object reference - vmfmor = dcprops.vmFolder._obj + if vm_extra_config.get('folder'): + if vm_extra_config['folder'] not in vsphere_client._get_managed_objects(MORTypes.Folder).values(): + vsphere_client.disconnect() + module.fail_json(msg="Cannot find folder named: %s" % vm_extra_config['folder']) + + for mor, name in vsphere_client._get_managed_objects(MORTypes.Folder).iteritems(): + if name == vm_extra_config['folder']: + vmfmor = mor + else: + vmfmor = dcprops.vmFolder._obj # networkFolder managed object reference nfmor = dcprops.networkFolder._obj @@ -936,6 +1013,11 @@ def gather_facts(vm): 'hw_processor_count': vm.properties.config.hardware.numCPU, 'hw_memtotal_mb': vm.properties.config.hardware.memoryMB, } + netInfo = vm.get_property('net') + netDict = {} + if netInfo: + for net in netInfo: + netDict[net['mac_address']] = net['ip_addresses'] ifidx = 0 for entry in vm.properties.config.hardware.device: @@ -948,6 +1030,7 @@ def gather_facts(vm): 'addresstype': entry.addressType, 'label': entry.deviceInfo.label, 'macaddress': entry.macAddress, + 'ipaddresses': netDict.get(entry.macAddress, None), 'macaddress_dash': entry.macAddress.replace(':', '-'), 'summary': entry.deviceInfo.summary, } @@ -1066,6 +1149,8 @@ def main(): ], default='present'), vmware_guest_facts=dict(required=False, choices=BOOLEANS), + from_template=dict(required=False, choices=BOOLEANS), + template_src=dict(required=False, type='str'), guest=dict(required=True, type='str'), vm_disk=dict(required=False, type='dict', default={}), vm_nic=dict(required=False, type='dict', default={}), @@ -1080,7 +1165,7 @@ def main(): ), supports_check_mode=False, - mutually_exclusive=[['state', 'vmware_guest_facts']], + mutually_exclusive=[['state', 'vmware_guest_facts'],['state', 'from_template']], required_together=[ ['state', 'force'], [ @@ -1090,7 +1175,8 @@ def main(): 'vm_hardware', 'esxi' ], - ['resource_pool', 'cluster'] + ['resource_pool', 'cluster'], + ['from_template', 'resource_pool', 'template_src'] ], ) @@ -1112,6 +1198,8 @@ def main(): esxi = module.params['esxi'] resource_pool = module.params['resource_pool'] cluster = module.params['cluster'] + template_src = module.params['template_src'] + from_template = module.params['from_template'] # CONNECT TO THE SERVER viserver = VIServer() @@ -1135,7 +1223,6 @@ def main(): except Exception, e: module.fail_json( msg="Fact gather failed with exception %s" % e) - # Power Changes elif state in ['powered_on', 'powered_off', 'restarted']: state_result = power_state(vm, state, force) @@ -1183,6 +1270,17 @@ def main(): module.fail_json( msg="No such VM %s. Fact gathering requires an existing vm" % guest) + + elif from_template: + deploy_template( + vsphere_client=viserver, + esxi=esxi, + resource_pool=resource_pool, + guest=guest, + template_src=template_src, + module=module, + cluster_name=cluster + ) if state in ['restarted', 'reconfigured']: module.fail_json( msg="No such VM %s. States [" diff --git a/commands/command.py b/commands/command.py index c1fabd4f9b4..2b79b327d71 100644 --- a/commands/command.py +++ b/commands/command.py @@ -18,6 +18,7 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . +import copy import sys import datetime import traceback @@ -99,12 +100,22 @@ EXAMPLES = ''' creates: /path/to/database ''' +# Dict of options and their defaults +OPTIONS = {'chdir': None, + 'creates': None, + 'executable': None, + 'NO_LOG': None, + 'removes': None, + 'warn': True, + } + # This is a pretty complex regex, which functions as follows: # # 1. (^|\s) # ^ look for a space or the beginning of the line -# 2. (creates|removes|chdir|executable|NO_LOG)= -# ^ look for a valid param, followed by an '=' +# 2. ({options_list})= +# ^ expanded to (chdir|creates|executable...)= +# look for a valid param, followed by an '=' # 3. (?P[\'"])? # ^ look for an optional quote character, which can either be # a single or double quote character, and store it for later @@ -114,8 +125,12 @@ EXAMPLES = ''' # ^ a non-escaped space or a non-escaped quote of the same kind # that was matched in the first 'quote' is found, or the end of # the line is reached - -PARAM_REGEX = re.compile(r'(^|\s)(creates|removes|chdir|executable|NO_LOG|warn)=(?P[\'"])?(.*?)(?(quote)(?[\'"])?(.*?)(?(quote)(? 1 and (v.startswith('"') and v.endswith('"') or v.startswith("'") and v.endswith("'")): - v = v[1:-1] - if k in ('creates', 'removes', 'chdir', 'executable', 'NO_LOG'): + v = unquote(v.strip()) + if k in OPTIONS.keys(): if k == "chdir": v = os.path.abspath(os.path.expanduser(v)) if not (os.path.exists(v) and os.path.isdir(v)): diff --git a/database/mysql/__init__.py b/database/mysql/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/database/mysql_db.py b/database/mysql/mysql_db.py similarity index 96% rename from database/mysql_db.py rename to database/mysql/mysql_db.py index 38dee608ba5..3983c66639a 100644 --- a/database/mysql_db.py +++ b/database/mysql/mysql_db.py @@ -118,7 +118,7 @@ def db_exists(cursor, db): return bool(res) def db_delete(cursor, db): - query = "DROP DATABASE `%s`" % db + query = "DROP DATABASE %s" % mysql_quote_identifier(db, 'database') cursor.execute(query) return True @@ -190,12 +190,14 @@ def db_import(module, host, user, password, db_name, target, port, socket=None): return rc, stdout, stderr def db_create(cursor, db, encoding, collation): + query_params = dict(enc=encoding, collate=collation) + query = ['CREATE DATABASE %s' % mysql_quote_identifier(db, 'database')] if encoding: - encoding = " CHARACTER SET %s" % encoding + query.append("CHARACTER SET %(enc)s") if collation: - collation = " COLLATE %s" % collation - query = "CREATE DATABASE `%s`%s%s" % (db, encoding, collation) - res = cursor.execute(query) + query.append("COLLATE %(collate)s") + query = ' '.join(query) + res = cursor.execute(query, query_params) return True def strip_quotes(s): @@ -360,4 +362,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.database import * +if __name__ == '__main__': + main() diff --git a/database/mysql_user.py b/database/mysql/mysql_user.py similarity index 82% rename from database/mysql_user.py rename to database/mysql/mysql_user.py index aaec05f99f5..3590fb8e640 100644 --- a/database/mysql_user.py +++ b/database/mysql/mysql_user.py @@ -117,6 +117,9 @@ EXAMPLES = """ # Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION' - mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present +# Modifiy user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself. +- mysql_user: name=bob append=true priv=*.*:REQUIRESSL state=present + # Ensure no user named 'sally' exists, also passing in the auth credentials. - mysql_user: login_user=root login_password=123456 name=sally state=absent @@ -151,6 +154,19 @@ except ImportError: else: mysqldb_found = True +VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION', + 'LOCK TABLES', 'REFERENCES', 'EVENT', 'ALTER', + 'DELETE', 'INDEX', 'INSERT', 'SELECT', 'UPDATE', + 'CREATE TEMPORARY TABLES', 'TRIGGER', 'CREATE VIEW', + 'SHOW VIEW', 'ALTER ROUTINE', 'CREATE ROUTINE', + 'EXECUTE', 'FILE', 'CREATE TABLESPACE', 'CREATE USER', + 'PROCESS', 'PROXY', 'RELOAD', 'REPLICATION CLIENT', + 'REPLICATION SLAVE', 'SHOW DATABASES', 'SHUTDOWN', + 'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE', 'REQUIRESSL')) + +class InvalidPrivsError(Exception): + pass + # =========================================== # MySQL module specific support methods. # @@ -171,7 +187,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs): changed = False grant_option = False - # Handle passwords. + # Handle passwords if password is not None: cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host)) current_pass_hash = cursor.fetchone() @@ -181,7 +197,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs): cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password)) changed = True - # Handle privileges. + # Handle privileges if new_priv is not None: curr_priv = privileges_get(cursor, user,host) @@ -217,7 +233,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs): return changed def user_delete(cursor, user, host): - cursor.execute("DROP USER %s@%s", (user,host)) + cursor.execute("DROP USER %s@%s", (user, host)) return True def privileges_get(cursor, user,host): @@ -231,7 +247,7 @@ def privileges_get(cursor, user,host): The dictionary format is the same as that returned by privileges_unpack() below. """ output = {} - cursor.execute("SHOW GRANTS FOR %s@%s", (user,host)) + cursor.execute("SHOW GRANTS FOR %s@%s", (user, host)) grants = cursor.fetchall() def pick(x): @@ -243,11 +259,13 @@ def privileges_get(cursor, user,host): for grant in grants: res = re.match("GRANT (.+) ON (.+) TO '.+'@'.+'( IDENTIFIED BY PASSWORD '.+')? ?(.*)", grant[0]) if res is None: - module.fail_json(msg="unable to parse the MySQL grant string") + raise InvalidPrivsError('unable to parse the MySQL grant string: %s' % grant[0]) privileges = res.group(1).split(", ") privileges = [ pick(x) for x in privileges] if "WITH GRANT OPTION" in res.group(4): privileges.append('GRANT') + if "REQUIRE SSL" in res.group(4): + privileges.append('REQUIRESSL') db = res.group(2) output[db] = privileges return output @@ -264,8 +282,8 @@ def privileges_unpack(priv): not specified in the string, as MySQL will always provide this by default. """ output = {} - for item in priv.split('/'): - pieces = item.split(':') + for item in priv.strip().split('/'): + pieces = item.strip().split(':') if '.' in pieces[0]: pieces[0] = pieces[0].split('.') for idx, piece in enumerate(pieces): @@ -274,27 +292,46 @@ def privileges_unpack(priv): pieces[0] = '.'.join(pieces[0]) output[pieces[0]] = pieces[1].upper().split(',') + new_privs = frozenset(output[pieces[0]]) + if not new_privs.issubset(VALID_PRIVS): + raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS)) if '*.*' not in output: output['*.*'] = ['USAGE'] + # if we are only specifying something like REQUIRESSL in *.* we still need + # to add USAGE as a privilege to avoid syntax errors + if priv.find('REQUIRESSL') != -1 and 'USAGE' not in output['*.*']: + output['*.*'].append('USAGE') + return output def privileges_revoke(cursor, user,host,db_table,grant_option): + # Escape '%' since mysql db.execute() uses a format string + db_table = db_table.replace('%', '%%') if grant_option: - query = "REVOKE GRANT OPTION ON %s FROM '%s'@'%s'" % (db_table,user,host) - cursor.execute(query) - query = "REVOKE ALL PRIVILEGES ON %s FROM '%s'@'%s'" % (db_table,user,host) - cursor.execute(query) + query = ["REVOKE GRANT OPTION ON %s" % mysql_quote_identifier(db_table, 'table')] + query.append("FROM %s@%s") + query = ' '.join(query) + cursor.execute(query, (user, host)) + query = ["REVOKE ALL PRIVILEGES ON %s" % mysql_quote_identifier(db_table, 'table')] + query.append("FROM %s@%s") + query = ' '.join(query) + cursor.execute(query, (user, host)) def privileges_grant(cursor, user,host,db_table,priv): - - priv_string = ",".join(filter(lambda x: x != 'GRANT', priv)) - query = "GRANT %s ON %s TO '%s'@'%s'" % (priv_string,db_table,user,host) + # Escape '%' since mysql db.execute uses a format string and the + # specification of db and table often use a % (SQL wildcard) + db_table = db_table.replace('%', '%%') + priv_string = ",".join(filter(lambda x: x not in [ 'GRANT', 'REQUIRESSL' ], priv)) + query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))] + query.append("TO %s@%s") if 'GRANT' in priv: - query = query + " WITH GRANT OPTION" - cursor.execute(query) - + query.append("WITH GRANT OPTION") + if 'REQUIRESSL' in priv: + query.append("REQUIRE SSL") + query = ' '.join(query) + cursor.execute(query, (user, host)) def strip_quotes(s): """ Remove surrounding single or double quotes @@ -425,8 +462,8 @@ def main(): if priv is not None: try: priv = privileges_unpack(priv) - except: - module.fail_json(msg="invalid privileges string") + except Exception, e: + module.fail_json(msg="invalid privileges string: %s" % str(e)) # Either the caller passes both a username and password with which to connect to # mysql, or they pass neither and allow this module to read the credentials from @@ -459,11 +496,17 @@ def main(): if state == "present": if user_exists(cursor, user, host): - changed = user_mod(cursor, user, host, password, priv, append_privs) + try: + changed = user_mod(cursor, user, host, password, priv, append_privs) + except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: + module.fail_json(msg=str(e)) else: if password is None: module.fail_json(msg="password parameter required when adding a user") - changed = user_add(cursor, user, host, password, priv) + try: + changed = user_add(cursor, user, host, password, priv) + except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e: + module.fail_json(msg=str(e)) elif state == "absent": if user_exists(cursor, user, host): changed = user_delete(cursor, user, host) @@ -473,4 +516,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.database import * +if __name__ == '__main__': + main() diff --git a/database/mysql_variables.py b/database/mysql/mysql_variables.py similarity index 94% rename from database/mysql_variables.py rename to database/mysql/mysql_variables.py index 7353fdd485d..199c5eb6eca 100644 --- a/database/mysql_variables.py +++ b/database/mysql/mysql_variables.py @@ -103,7 +103,7 @@ def typedvalue(value): def getvariable(cursor, mysqlvar): - cursor.execute("SHOW VARIABLES LIKE '" + mysqlvar + "'") + cursor.execute("SHOW VARIABLES LIKE %s", (mysqlvar,)) mysqlvar_val = cursor.fetchall() return mysqlvar_val @@ -116,8 +116,11 @@ def setvariable(cursor, mysqlvar, value): should be passed as numeric literals. """ + query = ["SET GLOBAL %s" % mysql_quote_identifier(mysqlvar, 'vars') ] + query.append(" = %s") + query = ' '.join(query) try: - cursor.execute("SET GLOBAL " + mysqlvar + " = %s", (value,)) + cursor.execute(query, (value,)) cursor.fetchall() result = True except Exception, e: @@ -242,7 +245,10 @@ def main(): value_actual = typedvalue(mysqlvar_val[0][1]) if value_wanted == value_actual: module.exit_json(msg="Variable already set to requested value", changed=False) - result = setvariable(cursor, mysqlvar, value_wanted) + try: + result = setvariable(cursor, mysqlvar, value_wanted) + except SQLParseError, e: + result = str(e) if result is True: module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual, changed=True) else: @@ -250,4 +256,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.database import * main() diff --git a/database/postgresql/__init__.py b/database/postgresql/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/database/postgresql_db.py b/database/postgresql/postgresql_db.py similarity index 83% rename from database/postgresql_db.py rename to database/postgresql/postgresql_db.py index 605be621601..941644d6fb1 100644 --- a/database/postgresql_db.py +++ b/database/postgresql/postgresql_db.py @@ -44,6 +44,11 @@ options: - Host running the database required: false default: localhost + login_unix_socket: + description: + - Path to a Unix domain socket for local connections + required: false + default: null owner: description: - Name of the role to set as owner of the database @@ -124,7 +129,9 @@ class NotSupportedError(Exception): # def set_owner(cursor, db, owner): - query = "ALTER DATABASE \"%s\" OWNER TO \"%s\"" % (db, owner) + query = "ALTER DATABASE %s OWNER TO %s" % ( + pg_quote_identifier(db, 'database'), + pg_quote_identifier(owner, 'role')) cursor.execute(query) return True @@ -141,7 +148,7 @@ def get_db_info(cursor, db): FROM pg_database JOIN pg_roles ON pg_roles.oid = pg_database.datdba WHERE datname = %(db)s """ - cursor.execute(query, {'db':db}) + cursor.execute(query, {'db': db}) return cursor.fetchone() def db_exists(cursor, db): @@ -151,32 +158,32 @@ def db_exists(cursor, db): def db_delete(cursor, db): if db_exists(cursor, db): - query = "DROP DATABASE \"%s\"" % db + query = "DROP DATABASE %s" % pg_quote_identifier(db, 'database') cursor.execute(query) return True else: return False def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype): + params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype) if not db_exists(cursor, db): + query_fragments = ['CREATE DATABASE %s' % pg_quote_identifier(db, 'database')] if owner: - owner = " OWNER \"%s\"" % owner + query_fragments.append('OWNER %s' % pg_quote_identifier(owner, 'role')) if template: - template = " TEMPLATE \"%s\"" % template + query_fragments.append('TEMPLATE %s' % pg_quote_identifier(template, 'database')) if encoding: - encoding = " ENCODING '%s'" % encoding + query_fragments.append('ENCODING %(enc)s') if lc_collate: - lc_collate = " LC_COLLATE '%s'" % lc_collate + query_fragments.append('LC_COLLATE %(collate)s') if lc_ctype: - lc_ctype = " LC_CTYPE '%s'" % lc_ctype - query = 'CREATE DATABASE "%s"%s%s%s%s%s' % (db, owner, - template, encoding, - lc_collate, lc_ctype) - cursor.execute(query) + query_fragments.append('LC_CTYPE %(ctype)s') + query = ' '.join(query_fragments) + cursor.execute(query, params) return True else: db_info = get_db_info(cursor, db) - if (encoding and + if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']): raise NotSupportedError( 'Changing database encoding is not supported. ' @@ -202,7 +209,7 @@ def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype): return False else: db_info = get_db_info(cursor, db) - if (encoding and + if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']): return False elif lc_collate and lc_collate != db_info['lc_collate']: @@ -224,6 +231,7 @@ def main(): login_user=dict(default="postgres"), login_password=dict(default=""), login_host=dict(default=""), + login_unix_socket=dict(default=""), port=dict(default="5432"), db=dict(required=True, aliases=['name']), owner=dict(default=""), @@ -249,7 +257,7 @@ def main(): state = module.params["state"] changed = False - # To use defaults values, keyword arguments must be absent, so + # To use defaults values, keyword arguments must be absent, so # check which values are empty and don't include in the **kw # dictionary params_map = { @@ -258,8 +266,14 @@ def main(): "login_password":"password", "port":"port" } - kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() + kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() if k in params_map and v != '' ) + + # If a login_unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" + if is_localhost and module.params["login_unix_socket"] != "": + kw["host"] = module.params["login_unix_socket"] + try: db_connection = psycopg2.connect(database="template1", **kw) # Enable autocommit so we can create databases @@ -284,13 +298,22 @@ def main(): module.exit_json(changed=changed,db=db) if state == "absent": - changed = db_delete(cursor, db) + try: + changed = db_delete(cursor, db) + except SQLParseError, e: + module.fail_json(msg=str(e)) elif state == "present": - changed = db_create(cursor, db, owner, template, encoding, + try: + changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype) + except SQLParseError, e: + module.fail_json(msg=str(e)) except NotSupportedError, e: module.fail_json(msg=str(e)) + except SystemExit: + # Avoid catching this on Python 2.4 + raise except Exception, e: module.fail_json(msg="Database query failed: %s" % e) @@ -298,4 +321,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.database import * +if __name__ == '__main__': + main() diff --git a/database/postgresql_privs.py b/database/postgresql/postgresql_privs.py similarity index 92% rename from database/postgresql_privs.py rename to database/postgresql/postgresql_privs.py index de5fa94fa48..9b9d94923bc 100644 --- a/database/postgresql_privs.py +++ b/database/postgresql/postgresql_privs.py @@ -29,7 +29,7 @@ description: options: database: description: - - Name of database to connect to. + - Name of database to connect to. - 'Alias: I(db)' required: yes state: @@ -53,7 +53,7 @@ options: schema, language, tablespace, group] objs: description: - - Comma separated list of database objects to set privileges on. + - Comma separated list of database objects to set privileges on. - If I(type) is C(table) or C(sequence), the special value C(ALL_IN_SCHEMA) can be provided instead to specify all database objects of type I(type) in the schema specified via I(schema). (This @@ -99,6 +99,12 @@ options: - Database port to connect to. required: no default: 5432 + unix_socket: + description: + - Path to a Unix domain socket for local connections. + - 'Alias: I(login_unix_socket)' + required: false + default: null login: description: - The username to authenticate with. @@ -135,7 +141,7 @@ author: Bernhard Weitzhofer EXAMPLES = """ # On database "library": -# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors +# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors # TO librarian, reader WITH GRANT OPTION - postgresql_privs: > database=library @@ -155,8 +161,8 @@ EXAMPLES = """ roles=librarian,reader grant_option=yes -# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader -# Note that role "reader" will be *granted* INSERT privilege itself if this +# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader +# Note that role "reader" will be *granted* INSERT privilege itself if this # isn't already the case (since state=present). - postgresql_privs: > db=library @@ -214,7 +220,7 @@ EXAMPLES = """ role=librarian # GRANT ALL PRIVILEGES ON DATABASE library TO librarian -# If objs is omitted for type "database", it defaults to the database +# If objs is omitted for type "database", it defaults to the database # to which the connection is established - postgresql_privs: > db=library @@ -230,6 +236,9 @@ except ImportError: psycopg2 = None +VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', + 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT', + 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE')) class Error(Exception): pass @@ -264,6 +273,12 @@ class Connection(object): } kw = dict( (params_map[k], getattr(params, k)) for k in params_map if getattr(params, k) != '' ) + + # If a unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" + if is_localhost and params.unix_socket != "": + kw["host"] = params.unix_socket + self.connection = psycopg2.connect(**kw) self.cursor = self.connection.cursor() @@ -386,9 +401,9 @@ class Connection(object): def get_group_memberships(self, groups): query = """SELECT roleid, grantor, member, admin_option - FROM pg_catalog.pg_auth_members am + FROM pg_catalog.pg_auth_members am JOIN pg_catalog.pg_roles r ON r.oid = am.roleid - WHERE r.rolname = ANY(%s) + WHERE r.rolname = ANY(%s) ORDER BY roleid, grantor, member""" self.cursor.execute(query, (groups,)) return self.cursor.fetchall() @@ -402,14 +417,14 @@ class Connection(object): :param obj_type: Type of database object to grant/revoke privileges for. - :param privs: Either a list of privileges to grant/revoke + :param privs: Either a list of privileges to grant/revoke or None if type is "group". :param objs: List of database objects to grant/revoke privileges for. :param roles: Either a list of role names or "PUBLIC" for the implicitly defined "PUBLIC" group :param state: "present" to grant privileges, "absent" to revoke. - :param grant_option: Only for state "present": If True, set + :param grant_option: Only for state "present": If True, set grant/admin option. If False, revoke it. If None, don't change grant option. :param schema_qualifier: Some object types ("TABLE", "SEQUENCE", @@ -454,19 +469,21 @@ class Connection(object): else: obj_ids = ['"%s"' % o for o in objs] - # set_what: SQL-fragment specifying what to set for the target roless: - # Either group membership or privileges on objects of a certain type. + # set_what: SQL-fragment specifying what to set for the target roles: + # Either group membership or privileges on objects of a certain type if obj_type == 'group': - set_what = ','.join(obj_ids) + set_what = ','.join(pg_quote_identifier(i, 'role') for i in obj_ids) else: - set_what = '%s ON %s %s' % (','.join(privs), obj_type, - ','.join(obj_ids)) + # Note: obj_type has been checked against a set of string literals + # and privs was escaped when it was parsed + set_what = '%s ON %s %s' % (','.join(privs), obj_type, + ','.join(pg_quote_identifier(i, 'table') for i in obj_ids)) # for_whom: SQL-fragment specifying for whom to set the above if roles == 'PUBLIC': for_whom = 'PUBLIC' else: - for_whom = ','.join(['"%s"' % r for r in roles]) + for_whom = ','.join(pg_quote_identifier(r, 'role') for r in roles) status_before = get_status(objs) if state == 'present': @@ -476,7 +493,7 @@ class Connection(object): else: query = 'GRANT %s TO %s WITH GRANT OPTION' else: - query = 'GRANT %s TO %s' + query = 'GRANT %s TO %s' self.cursor.execute(query % (set_what, for_whom)) # Only revoke GRANT/ADMIN OPTION if grant_option actually is False. @@ -487,7 +504,7 @@ class Connection(object): query = 'REVOKE GRANT OPTION FOR %s FROM %s' self.cursor.execute(query % (set_what, for_whom)) else: - query = 'REVOKE %s FROM %s' + query = 'REVOKE %s FROM %s' self.cursor.execute(query % (set_what, for_whom)) status_after = get_status(objs) return status_before != status_after @@ -511,10 +528,11 @@ def main(): objs=dict(required=False, aliases=['obj']), schema=dict(required=False), roles=dict(required=True, aliases=['role']), - grant_option=dict(required=False, type='bool', + grant_option=dict(required=False, type='bool', aliases=['admin_option']), host=dict(default='', aliases=['login_host']), port=dict(type='int', default=5432), + unix_socket=dict(default='', aliases=['login_unix_socket']), login=dict(default='postgres', aliases=['login_user']), password=dict(default='', aliases=['login_password']) ), @@ -558,7 +576,9 @@ def main(): try: # privs if p.privs: - privs = p.privs.split(',') + privs = frozenset(pr.upper() for pr in p.privs.split(',')) + if not privs.issubset(VALID_PRIVS): + module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS)) else: privs = None @@ -610,4 +630,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +from ansible.module_utils.database import * +if __name__ == '__main__': + main() diff --git a/database/postgresql_user.py b/database/postgresql/postgresql_user.py similarity index 75% rename from database/postgresql_user.py rename to database/postgresql/postgresql_user.py index 8af8c45d0c5..020b3740a63 100644 --- a/database/postgresql_user.py +++ b/database/postgresql/postgresql_user.py @@ -78,6 +78,11 @@ options: - Host running PostgreSQL. required: false default: localhost + login_unix_socket: + description: + - Path to a Unix domain socket for local connections + required: false + default: null priv: description: - "PostgreSQL privileges string in the format: C(table:priv1,priv2)" @@ -145,6 +150,7 @@ INSERT,UPDATE/table:SELECT/anothertable:ALL ''' import re +import itertools try: import psycopg2 @@ -153,6 +159,19 @@ except ImportError: else: postgresqldb_found = True +_flags = ('SUPERUSER', 'CREATEROLE', 'CREATEUSER', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION') +VALID_FLAGS = frozenset(itertools.chain(_flags, ('NO%s' % f for f in _flags))) + +VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL', 'USAGE')), + database=frozenset(('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL', 'USAGE')), + ) + +class InvalidFlagsError(Exception): + pass + +class InvalidPrivsError(Exception): + pass + # =========================================== # PostgreSQL module specific support methods. # @@ -167,17 +186,18 @@ def user_exists(cursor, user): return cursor.rowcount > 0 -def user_add(cursor, user, password, role_attr_flags, encrypted, expires): +def user_add(cursor, user, password, role_attr_flags, encrypted, expires): """Create a new database user (role).""" - query_password_data = dict() - query = 'CREATE USER "%(user)s"' % { "user": user} + # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a literal + query_password_data = dict(password=password, expires=expires) + query = ['CREATE USER %(user)s' % { "user": pg_quote_identifier(user, 'role')}] if password is not None: - query = query + " WITH %(crypt)s" % { "crypt": encrypted } - query = query + " PASSWORD %(password)s" - query_password_data.update(password=password) + query.append("WITH %(crypt)s" % { "crypt": encrypted }) + query.append("PASSWORD %(password)s") if expires is not None: - query = query + " VALID UNTIL '%(expires)s'" % { "expires": expires } - query = query + " " + role_attr_flags + query.append("VALID UNTIL %(expires)s") + query.append(role_attr_flags) + query = ' '.join(query) cursor.execute(query, query_password_data) return True @@ -185,6 +205,7 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir """Change user password and/or attributes. Return True if changed, False otherwise.""" changed = False + # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a literal if user == 'PUBLIC': if password is not None: module.fail_json(msg="cannot change the password for PUBLIC user") @@ -196,25 +217,24 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir # Handle passwords. if password is not None or role_attr_flags is not None: # Select password and all flag-like columns in order to verify changes. - query_password_data = dict() + query_password_data = dict(password=password, expires=expires) select = "SELECT * FROM pg_authid where rolname=%(user)s" cursor.execute(select, {"user": user}) # Grab current role attributes. current_role_attrs = cursor.fetchone() - alter = 'ALTER USER "%(user)s"' % {"user": user} + alter = ['ALTER USER %(user)s' % {"user": pg_quote_identifier(user, 'role')}] if password is not None: - query_password_data.update(password=password) - alter = alter + " WITH %(crypt)s" % {"crypt": encrypted} - alter = alter + " PASSWORD %(password)s" - alter = alter + " %(flags)s" % {'flags': role_attr_flags} + alter.append("WITH %(crypt)s" % {"crypt": encrypted}) + alter.append("PASSWORD %(password)s") + alter.append(role_attr_flags) elif role_attr_flags: - alter = alter + ' WITH ' + role_attr_flags + alter.append('WITH %s' % role_attr_flags) if expires is not None: - alter = alter + " VALID UNTIL '%(expires)s'" % { "exipres": expires } + alter.append("VALID UNTIL %(expires)s") try: - cursor.execute(alter, query_password_data) + cursor.execute(' '.join(alter), query_password_data) except psycopg2.InternalError, e: if e.pgcode == '25006': # Handle errors due to read-only transactions indicated by pgcode 25006 @@ -240,7 +260,7 @@ def user_delete(cursor, user): """Try to remove a user. Returns True if successful otherwise False""" cursor.execute("SAVEPOINT ansible_pgsql_user_delete") try: - cursor.execute("DROP USER \"%s\"" % user) + cursor.execute("DROP USER %s" % pg_quote_identifier(user, 'role')) except: cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete") cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete") @@ -264,36 +284,20 @@ def get_table_privileges(cursor, user, table): cursor.execute(query, (user, table, schema)) return set([x[0] for x in cursor.fetchall()]) - -def quote_pg_identifier(identifier): - """ - quote postgresql identifiers involving zero or more namespaces - """ - - if '"' in identifier: - # the user has supplied their own quoting. we have to hope they're - # doing it right. Maybe they have an unfortunately named table - # containing a period in the name, such as: "public"."users.2013" - return identifier - - tokens = identifier.strip().split(".") - quoted_tokens = [] - for token in tokens: - quoted_tokens.append('"%s"' % (token, )) - return ".".join(quoted_tokens) - def grant_table_privilege(cursor, user, table, priv): + # Note: priv escaped by parse_privs prev_priv = get_table_privileges(cursor, user, table) query = 'GRANT %s ON TABLE %s TO %s' % ( - priv, quote_pg_identifier(table), quote_pg_identifier(user), ) + priv, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') ) cursor.execute(query) curr_priv = get_table_privileges(cursor, user, table) return len(curr_priv) > len(prev_priv) def revoke_table_privilege(cursor, user, table, priv): + # Note: priv escaped by parse_privs prev_priv = get_table_privileges(cursor, user, table) query = 'REVOKE %s ON TABLE %s FROM %s' % ( - priv, quote_pg_identifier(table), quote_pg_identifier(user), ) + priv, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') ) cursor.execute(query) curr_priv = get_table_privileges(cursor, user, table) return len(curr_priv) < len(prev_priv) @@ -324,21 +328,29 @@ def has_database_privilege(cursor, user, db, priv): return cursor.fetchone()[0] def grant_database_privilege(cursor, user, db, priv): + # Note: priv escaped by parse_privs prev_priv = get_database_privileges(cursor, user, db) if user == "PUBLIC": - query = 'GRANT %s ON DATABASE \"%s\" TO PUBLIC' % (priv, db) + query = 'GRANT %s ON DATABASE %s TO PUBLIC' % ( + priv, pg_quote_identifier(db, 'database')) else: - query = 'GRANT %s ON DATABASE \"%s\" TO \"%s\"' % (priv, db, user) + query = 'GRANT %s ON DATABASE %s TO %s' % ( + priv, pg_quote_identifier(db, 'database'), + pg_quote_identifier(user, 'role')) cursor.execute(query) curr_priv = get_database_privileges(cursor, user, db) return len(curr_priv) > len(prev_priv) def revoke_database_privilege(cursor, user, db, priv): + # Note: priv escaped by parse_privs prev_priv = get_database_privileges(cursor, user, db) if user == "PUBLIC": - query = 'REVOKE %s ON DATABASE \"%s\" FROM PUBLIC' % (priv, db) + query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % ( + priv, pg_quote_identifier(db, 'database')) else: - query = 'REVOKE %s ON DATABASE \"%s\" FROM \"%s\"' % (priv, db, user) + query = 'REVOKE %s ON DATABASE %s FROM %s' % ( + priv, pg_quote_identifier(db, 'database'), + pg_quote_identifier(user, 'role')) cursor.execute(query) curr_priv = get_database_privileges(cursor, user, db) return len(curr_priv) < len(prev_priv) @@ -387,11 +399,20 @@ def parse_role_attrs(role_attr_flags): Where: attributes := CREATEDB,CREATEROLE,NOSUPERUSER,... + [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEUSER", "[NO]CREATEDB", + "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION" ] + """ - if ',' not in role_attr_flags: - return role_attr_flags - flag_set = role_attr_flags.split(",") - o_flags = " ".join(flag_set) + if ',' in role_attr_flags: + flag_set = frozenset(r.upper() for r in role_attr_flags.split(",")) + elif role_attr_flags: + flag_set = frozenset((role_attr_flags.upper(),)) + else: + flag_set = frozenset() + if not flag_set.issubset(VALID_FLAGS): + raise InvalidFlagsError('Invalid role_attr_flags specified: %s' % + ' '.join(flag_set.difference(VALID_FLAGS))) + o_flags = ' '.join(flag_set) return o_flags def parse_privs(privs, db): @@ -417,12 +438,15 @@ def parse_privs(privs, db): if ':' not in token: type_ = 'database' name = db - priv_set = set(x.strip() for x in token.split(',')) + priv_set = frozenset(x.strip().upper() for x in token.split(',') if x.strip()) else: type_ = 'table' name, privileges = token.split(':', 1) - priv_set = set(x.strip() for x in privileges.split(',')) + priv_set = frozenset(x.strip().upper() for x in privileges.split(',') if x.strip()) + if not priv_set.issubset(VALID_PRIVS[type_]): + raise InvalidPrivsError('Invalid privs specified for %s: %s' % + (type_, ' '.join(priv_set.difference(VALID_PRIVS[type_])))) o_privs[type_][name] = priv_set return o_privs @@ -437,6 +461,7 @@ def main(): login_user=dict(default="postgres"), login_password=dict(default=""), login_host=dict(default=""), + login_unix_socket=dict(default=""), user=dict(required=True, aliases=['name']), password=dict(default=None), state=dict(default="present", choices=["absent", "present"]), @@ -460,7 +485,10 @@ def main(): module.fail_json(msg="privileges require a database to be specified") privs = parse_privs(module.params["priv"], db) port = module.params["port"] - role_attr_flags = parse_role_attrs(module.params["role_attr_flags"]) + try: + role_attr_flags = parse_role_attrs(module.params["role_attr_flags"]) + except InvalidFlagsError, e: + module.fail_json(msg=str(e)) if module.params["encrypted"]: encrypted = "ENCRYPTED" else: @@ -482,6 +510,12 @@ def main(): } kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems() if k in params_map and v != "" ) + + # If a login_unix_socket is specified, incorporate it here. + is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost" + if is_localhost and module.params["login_unix_socket"] != "": + kw["host"] = module.params["login_unix_socket"] + try: db_connection = psycopg2.connect(**kw) cursor = db_connection.cursor() @@ -494,18 +528,30 @@ def main(): if state == "present": if user_exists(cursor, user): - changed = user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires) + try: + changed = user_alter(cursor, module, user, password, role_attr_flags, encrypted, expires) + except SQLParseError, e: + module.fail_json(msg=str(e)) else: - changed = user_add(cursor, user, password, role_attr_flags, encrypted, expires) - changed = grant_privileges(cursor, user, privs) or changed + try: + changed = user_add(cursor, user, password, role_attr_flags, encrypted, expires) + except SQLParseError, e: + module.fail_json(msg=str(e)) + try: + changed = grant_privileges(cursor, user, privs) or changed + except SQLParseError, e: + module.fail_json(msg=str(e)) else: if user_exists(cursor, user): if module.check_mode: changed = True kw['user_removed'] = True else: - changed = revoke_privileges(cursor, user, privs) - user_removed = user_delete(cursor, user) + try: + changed = revoke_privileges(cursor, user, privs) + user_removed = user_delete(cursor, user) + except SQLParseError, e: + module.fail_json(msg=str(e)) changed = changed or user_removed if fail_on_user and not user_removed: msg = "unable to remove user" @@ -523,4 +569,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.database import * main() diff --git a/files/assemble.py b/files/assemble.py index a16431b9f52..a66c82f432a 100644 --- a/files/assemble.py +++ b/files/assemble.py @@ -153,8 +153,9 @@ def main(): ) changed = False - pathmd5 = None - destmd5 = None + path_md5 = None # Deprecated + path_hash = None + dest_hash = None src = os.path.expanduser(module.params['src']) dest = os.path.expanduser(module.params['dest']) backup = module.params['backup'] @@ -175,23 +176,29 @@ def main(): module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (e, regexp)) path = assemble_from_fragments(src, delimiter, compiled_regexp) - pathmd5 = module.md5(path) + path_hash = module.sha1(path) if os.path.exists(dest): - destmd5 = module.md5(dest) + dest_hash = module.sha1(dest) - if pathmd5 != destmd5: - if backup and destmd5 is not None: + if path_hash != dest_hash: + if backup and dest_hash is not None: module.backup_local(dest) shutil.copy(path, dest) changed = True + # Backwards compat. This won't return data if FIPS mode is active + try: + pathmd5 = module.md5(path) + except ValueError: + pathmd5 = None + os.remove(path) file_args = module.load_file_common_arguments(module.params) changed = module.set_fs_attributes_if_different(file_args, changed) # Mission complete - module.exit_json(src=src, dest=dest, md5sum=pathmd5, changed=changed, msg="OK") + module.exit_json(src=src, dest=dest, md5sum=pathmd5, checksum=path_hash, changed=changed, msg="OK") # import module snippets from ansible.module_utils.basic import * diff --git a/files/copy.py b/files/copy.py index eff46dae982..c5aaa01b5b3 100644 --- a/files/copy.py +++ b/files/copy.py @@ -27,7 +27,7 @@ module: copy version_added: "historical" short_description: Copies files to remote locations. description: - - The M(copy) module copies a file on the local box to remote locations. + - The M(copy) module copies a file on the local box to remote locations. Use the M(fetch) module to copy files from remote locations to the local box. options: src: description: @@ -167,8 +167,13 @@ def main(): if not os.access(src, os.R_OK): module.fail_json(msg="Source %s not readable" % (src)) - md5sum_src = module.md5(src) - md5sum_dest = None + checksum_src = module.sha1(src) + checksum_dest = None + # Backwards compat only. This will be None in FIPS mode + try: + md5sum_src = module.md5(src) + except ValueError: + md5sum_src = None changed = False @@ -176,7 +181,7 @@ def main(): if original_basename and dest.endswith("/"): dest = os.path.join(dest, original_basename) dirname = os.path.dirname(dest) - if not os.path.exists(dirname): + if not os.path.exists(dirname) and '/' in dirname: (pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname) os.makedirs(dirname) directory_args = module.load_file_common_arguments(module.params) @@ -198,7 +203,7 @@ def main(): basename = original_basename dest = os.path.join(dest, basename) if os.access(dest, os.R_OK): - md5sum_dest = module.md5(dest) + checksum_dest = module.sha1(dest) else: if not os.path.exists(os.path.dirname(dest)): try: @@ -215,7 +220,7 @@ def main(): module.fail_json(msg="Destination %s not writable" % (os.path.dirname(dest))) backup_file = None - if md5sum_src != md5sum_dest or os.path.islink(dest): + if checksum_src != checksum_dest or os.path.islink(dest): try: if backup: if os.path.exists(dest): @@ -238,7 +243,7 @@ def main(): changed = False res_args = dict( - dest = dest, src = src, md5sum = md5sum_src, changed = changed + dest = dest, src = src, md5sum = md5sum_src, checksum = checksum_src, changed = changed ) if backup_file: res_args['backup_file'] = backup_file diff --git a/files/fetch.py b/files/fetch.py index 5b47d87a856..fd631e6ebe6 100644 --- a/files/fetch.py +++ b/files/fetch.py @@ -34,13 +34,14 @@ options: required: false choices: [ "yes", "no" ] default: "no" - validate_md5: + validate_checksum: version_added: "1.4" description: - - Verify that the source and destination md5sums match after the files are fetched. + - Verify that the source and destination checksums match after the files are fetched. required: false choices: [ "yes", "no" ] default: "yes" + aliases: [ "validate_md5" ] flat: version_added: "1.2" description: diff --git a/files/file.py b/files/file.py index ff9feb41ee3..35bb52ab1e3 100644 --- a/files/file.py +++ b/files/file.py @@ -103,6 +103,23 @@ EXAMPLES = ''' ''' + +def get_state(path): + ''' Find out current state ''' + + if os.path.lexists(path): + if os.path.islink(path): + return 'link' + elif os.path.isdir(path): + return 'directory' + elif os.stat(path).st_nlink > 1: + return 'hard' + else: + # could be many other things, but defaulting to file + return 'file' + + return 'absent' + def main(): module = AnsibleModule( @@ -143,18 +160,7 @@ def main(): pass module.exit_json(path=path, changed=False, appears_binary=appears_binary) - # Find out current state - prev_state = 'absent' - if os.path.lexists(path): - if os.path.islink(path): - prev_state = 'link' - elif os.path.isdir(path): - prev_state = 'directory' - elif os.stat(path).st_nlink > 1: - prev_state = 'hard' - else: - # could be many other things, but defaulting to file - prev_state = 'file' + prev_state = get_state(path) # state should default to file, but since that creates many conflicts, # default to 'current' when it exists. @@ -168,22 +174,24 @@ def main(): # or copy module, even if this module never uses it, it is needed to key off some things if src is not None: src = os.path.expanduser(src) - - # original_basename is used by other modules that depend on file. - if os.path.isdir(path) and state not in ["link", "absent"]: - if params['original_basename']: - basename = params['original_basename'] - else: - basename = os.path.basename(src) - params['path'] = path = os.path.join(path, basename) else: if state in ['link','hard']: - if follow: + if follow and state == 'link': # use the current target of the link as the source src = os.readlink(path) else: module.fail_json(msg='src and dest are required for creating links') + # original_basename is used by other modules that depend on file. + if os.path.isdir(path) and state not in ["link", "absent"]: + basename = None + if params['original_basename']: + basename = params['original_basename'] + elif src is not None: + basename = os.path.basename(src) + if basename: + params['path'] = path = os.path.join(path, basename) + # make sure the target path is a directory when we're doing a recursive operation recurse = params['recurse'] if recurse and state != 'directory': @@ -210,7 +218,15 @@ def main(): module.exit_json(path=path, changed=False) elif state == 'file': + if state != prev_state: + if follow and prev_state == 'link': + # follow symlink and operate on original + path = os.readlink(path) + prev_state = get_state(path) + file_args['path'] = path + + if prev_state not in ['file','hard']: # file is not absent and any other state is a conflict module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state)) @@ -218,6 +234,11 @@ def main(): module.exit_json(path=path, changed=changed) elif state == 'directory': + + if follow and prev_state == 'link': + path = os.readlink(path) + prev_state = get_state(path) + if prev_state == 'absent': if module.check_mode: module.exit_json(changed=True) @@ -238,6 +259,10 @@ def main(): tmp_file_args['path']=curpath changed = module.set_fs_attributes_if_different(tmp_file_args, changed) + # We already know prev_state is not 'absent', therefore it exists in some form. + elif prev_state != 'directory': + module.fail_json(path=path, msg='%s already exists as a %s' % (path, prev_state)) + changed = module.set_fs_attributes_if_different(file_args, changed) if recurse: @@ -330,13 +355,13 @@ def main(): open(path, 'w').close() except OSError, e: module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e)) - elif prev_state in ['file', 'directory']: + elif prev_state in ['file', 'directory', 'hard']: try: os.utime(path, None) except OSError, e: module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e)) else: - module.fail_json(msg='Cannot touch other than files and directories') + module.fail_json(msg='Cannot touch other than files, directories, and hardlinks (%s is %s)' % (path, prev_state)) try: module.set_fs_attributes_if_different(file_args, True) except SystemExit, e: diff --git a/files/ini_file.py b/files/ini_file.py index 83a980f5ba8..756f2732a84 100644 --- a/files/ini_file.py +++ b/files/ini_file.py @@ -23,6 +23,7 @@ DOCUMENTATION = ''' --- module: ini_file short_description: Tweak settings in INI files +extends_documentation_fragment: files description: - Manage (add, remove, change) individual settings in an INI-style file without having to manage the file as a whole with, say, M(template) or M(assemble). Adds missing diff --git a/files/lineinfile.py b/files/lineinfile.py index 12f8dc89a7d..b9fc628e10c 100644 --- a/files/lineinfile.py +++ b/files/lineinfile.py @@ -28,12 +28,15 @@ DOCUMENTATION = """ --- module: lineinfile author: Daniel Hokka Zakrisson, Ahti Kitsik +extends_documentation_fragment: files short_description: Ensure a particular line is in a file, or replace an existing line using a back-referenced regular expression. description: - This module will search a file for a line, and ensure that it is present or absent. - - This is primarily useful when you want to change a single line in a - file only. For other cases, see the M(copy) or M(template) modules. + - This is primarily useful when you want to change a single line in + a file only. See the M(replace) module if you want to change + multiple, similar lines; for other cases, see the M(copy) or + M(template) modules. version_added: "0.7" options: dest: @@ -127,7 +130,7 @@ options: """ EXAMPLES = r""" -- lineinfile: dest=/etc/selinux/config regexp=^SELINUX= line=SELINUX=disabled +- lineinfile: dest=/etc/selinux/config regexp=^SELINUX= line=SELINUX=enforcing - lineinfile: dest=/etc/sudoers state=absent regexp="^%wheel" @@ -145,7 +148,7 @@ EXAMPLES = r""" - lineinfile: dest=/opt/jboss-as/bin/standalone.conf regexp='^(.*)Xms(\d+)m(.*)$' line='\1Xms${xms}m\3' backrefs=yes -# Validate a the sudoers file before saving +# Validate the sudoers file before saving - lineinfile: dest=/etc/sudoers state=present regexp='^%ADMIN ALL\=' line='%ADMIN ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s' """ @@ -189,7 +192,7 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create, if not create: module.fail_json(rc=257, msg='Destination %s does not exist !' % dest) destpath = os.path.dirname(dest) - if not os.path.exists(destpath): + if not os.path.exists(destpath) and not module.check_mode: os.makedirs(destpath) lines = [] else: @@ -279,6 +282,9 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create, backupdest = module.backup_local(dest) write_changes(module, lines, dest) + if module.check_mode and not os.path.exists(dest): + module.exit_json(changed=changed, msg=msg, backup=backupdest) + msg, changed = check_file_attrs(module, changed, msg) module.exit_json(changed=changed, msg=msg, backup=backupdest) diff --git a/files/replace.py b/files/replace.py index 57b522dd773..b7b75a9604d 100644 --- a/files/replace.py +++ b/files/replace.py @@ -26,6 +26,7 @@ DOCUMENTATION = """ --- module: replace author: Evan Kaufman +extends_documentation_fragment: files short_description: Replace all instances of a particular string in a file using a back-referenced regular expression. description: diff --git a/files/stat.py b/files/stat.py index 8c717a395c4..484da2136d9 100644 --- a/files/stat.py +++ b/files/stat.py @@ -36,10 +36,17 @@ options: aliases: [] get_md5: description: - - Whether to return the md5 sum of the file + - Whether to return the md5 sum of the file. Will return None if we're unable to use md5 (Common for FIPS-140 compliant systems) required: false default: yes aliases: [] + get_checksum: + description: + - Whether to return a checksum of the file (currently sha1) + required: false + default: yes + aliases: [] + version_added: "1.8" author: Bruce Pennypacker ''' @@ -51,12 +58,12 @@ EXAMPLES = ''' - fail: msg="Whoops! file ownership has changed" when: st.stat.pw_name != 'root' -# Determine if a path exists and is a directory. Note we need to test +# Determine if a path exists and is a directory. Note that we need to test # both that p.stat.isdir actually exists, and also that it's set to true. - stat: path=/path/to/something register: p - debug: msg="Path exists and is a directory" - when: p.stat.isdir is defined and p.stat.isdir == true + when: p.stat.isdir is defined and p.stat.isdir # Don't do md5 checksum - stat: path=/path/to/myhugefile get_md5=no @@ -66,13 +73,15 @@ import os import sys from stat import * import pwd +import grp def main(): module = AnsibleModule( argument_spec = dict( path = dict(required=True), follow = dict(default='no', type='bool'), - get_md5 = dict(default='yes', type='bool') + get_md5 = dict(default='yes', type='bool'), + get_checksum = dict(default='yes', type='bool') ), supports_check_mode = True ) @@ -81,6 +90,7 @@ def main(): path = os.path.expanduser(path) follow = module.params.get('follow') get_md5 = module.params.get('get_md5') + get_checksum = module.params.get('get_checksum') try: if follow: @@ -99,6 +109,7 @@ def main(): # back to ansible d = { 'exists' : True, + 'path' : path, 'mode' : "%04o" % S_IMODE(mode), 'isdir' : S_ISDIR(mode), 'ischr' : S_ISCHR(mode), @@ -133,13 +144,23 @@ def main(): d['lnk_source'] = os.path.realpath(path) if S_ISREG(mode) and get_md5 and os.access(path,os.R_OK): - d['md5'] = module.md5(path) + # Will fail on FIPS-140 compliant systems + try: + d['md5'] = module.md5(path) + except ValueError: + d['md5'] = None + + if S_ISREG(mode) and get_checksum and os.access(path,os.R_OK): + d['checksum'] = module.sha1(path) try: pw = pwd.getpwuid(st.st_uid) d['pw_name'] = pw.pw_name + + grp_info = grp.getgrgid(pw.pw_gid) + d['gr_name'] = grp_info.gr_name except: pass diff --git a/files/synchronize.py b/files/synchronize.py index 842dd863849..a2138b3410d 100644 --- a/files/synchronize.py +++ b/files/synchronize.py @@ -39,7 +39,7 @@ options: version_added: "1.5" mode: description: - - Specify the direction of the synchroniztion. In push mode the localhost or delegate is the source; In pull mode the remote host in context is the source. + - Specify the direction of the synchronization. In push mode the localhost or delegate is the source; In pull mode the remote host in context is the source. required: false choices: [ 'push', 'pull' ] default: 'push' @@ -145,15 +145,16 @@ options: required: false version_added: "1.6" notes: + - rsync must be installed on both the local and remote machine. - Inspect the verbose output to validate the destination user/host/path are what was expected. - The remote user for the dest path will always be the remote_user, not - the sudo_user. + the sudo_user. - Expect that dest=~/x will be ~/x even if using sudo. - To exclude files and directories from being synchronized, you may add C(.rsync-filter) files to the source directory. - - + + author: Timothy Appnel ''' @@ -180,7 +181,9 @@ local_action: synchronize src=some/relative/path dest=/some/absolute/path pull mode synchronize: mode=pull src=some/relative/path dest=/some/absolute/path -# Synchronization of src on delegate host to dest on the current inventory host +# Synchronization of src on delegate host to dest on the current inventory host. +# If delegate_to is set to the current inventory host, this can be used to syncronize +# two directories on that host. synchronize: > src=some/relative/path dest=/some/absolute/path delegate_to: delegate.host diff --git a/files/unarchive.py b/files/unarchive.py index 657e464937b..fc2db0e6907 100644 --- a/files/unarchive.py +++ b/files/unarchive.py @@ -24,6 +24,7 @@ DOCUMENTATION = ''' module: unarchive version_added: 1.4 short_description: Copies an archive to a remote location and unpack it +extends_documentation_fragment: files description: - The M(unarchive) module copies an archive file from the local machine to a remote and unpacks it. options: @@ -75,18 +76,35 @@ EXAMPLES = ''' ''' import os +from zipfile import ZipFile +class UnarchiveError(Exception): + pass # class to handle .zip files -class ZipFile(object): - +class ZipArchive(object): + def __init__(self, src, dest, module): self.src = src self.dest = dest self.module = module self.cmd_path = self.module.get_bin_path('unzip') + self._files_in_archive = [] + + @property + def files_in_archive(self, force_refresh=False): + if self._files_in_archive and not force_refresh: + return self._files_in_archive + + archive = ZipFile(self.src) + try: + self._files_in_archive = archive.namelist() + except: + raise UnarchiveError('Unable to list files in the archive') + + return self._files_in_archive - def is_unarchived(self): + def is_unarchived(self, mode, owner, group): return dict(unarchived=False) def unarchive(self): @@ -105,19 +123,57 @@ class ZipFile(object): # class to handle gzipped tar files -class TgzFile(object): - +class TgzArchive(object): + def __init__(self, src, dest, module): self.src = src self.dest = dest self.module = module self.cmd_path = self.module.get_bin_path('tar') self.zipflag = 'z' + self._files_in_archive = [] + + @property + def files_in_archive(self, force_refresh=False): + if self._files_in_archive and not force_refresh: + return self._files_in_archive - def is_unarchived(self): - cmd = '%s -v -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src) + cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src) + rc, out, err = self.module.run_command(cmd) + if rc != 0: + raise UnarchiveError('Unable to list files in the archive') + + for filename in out.splitlines(): + if filename: + self._files_in_archive.append(filename) + return self._files_in_archive + + def is_unarchived(self, mode, owner, group): + cmd = '%s -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src) rc, out, err = self.module.run_command(cmd) unarchived = (rc == 0) + if not unarchived: + # Check whether the differences are in something that we're + # setting anyway + + # What will be set + to_be_set = set() + for perm in (('Mode', mode), ('Gid', group), ('Uid', owner)): + if perm[1] is not None: + to_be_set.add(perm[0]) + + # What is different + changes = set() + difference_re = re.compile(r': (.*) differs$') + for line in out.splitlines(): + match = difference_re.search(line) + if not match: + # Unknown tar output. Assume we have changes + return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd) + changes.add(match.groups()[0]) + + if changes and changes.issubset(to_be_set): + unarchived = True return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd) def unarchive(self): @@ -128,47 +184,41 @@ class TgzFile(object): def can_handle_archive(self): if not self.cmd_path: return False - cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src) - rc, out, err = self.module.run_command(cmd) - if rc == 0: - if len(out.splitlines(True)) > 0: + + try: + if self.files_in_archive: return True + except UnarchiveError: + pass + # Errors and no files in archive assume that we weren't able to + # properly unarchive it return False # class to handle tar files that aren't compressed -class TarFile(TgzFile): +class TarArchive(TgzArchive): def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') + super(TarArchive, self).__init__(src, dest, module) self.zipflag = '' # class to handle bzip2 compressed tar files -class TarBzip(TgzFile): +class TarBzipArchive(TgzArchive): def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') + super(TarBzipArchive, self).__init__(src, dest, module) self.zipflag = 'j' # class to handle xz compressed tar files -class TarXz(TgzFile): +class TarXzArchive(TgzArchive): def __init__(self, src, dest, module): - self.src = src - self.dest = dest - self.module = module - self.cmd_path = self.module.get_bin_path('tar') + super(TarXzArchive, self).__init__(src, dest, module) self.zipflag = 'J' # try handlers in order and return the one that works or bail if none work def pick_handler(src, dest, module): - handlers = [TgzFile, ZipFile, TarFile, TarBzip, TarXz] + handlers = [TgzArchive, ZipArchive, TarArchive, TarBzipArchive, TarXzArchive] for handler in handlers: obj = handler(src, dest, module) if obj.can_handle_archive(): @@ -192,7 +242,7 @@ def main(): src = os.path.expanduser(module.params['src']) dest = os.path.expanduser(module.params['dest']) copy = module.params['copy'] - creates = module.params['creates'] + file_args = module.load_file_common_arguments(module.params) # did tar file arrive? if not os.path.exists(src): @@ -203,20 +253,6 @@ def main(): if not os.access(src, os.R_OK): module.fail_json(msg="Source '%s' not readable" % src) - if creates: - # do not run the command if the line contains creates=filename - # and the filename already exists. This allows idempotence - # of command executions. - v = os.path.expanduser(creates) - if os.path.exists(v): - module.exit_json( - stdout="skipped, since %s exists" % v, - skipped=True, - changed=False, - stderr=False, - rc=0 - ) - # is dest OK to receive tar file? if not os.path.isdir(dest): module.fail_json(msg="Destination '%s' is not a directory" % dest) @@ -228,23 +264,29 @@ def main(): res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src) # do we need to do unpack? - res_args['check_results'] = handler.is_unarchived() + res_args['check_results'] = handler.is_unarchived(file_args['mode'], + file_args['owner'], file_args['group']) if res_args['check_results']['unarchived']: res_args['changed'] = False - module.exit_json(**res_args) - - # do the unpack - try: - res_args['extract_results'] = handler.unarchive() - if res_args['extract_results']['rc'] != 0: - module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args) - except IOError: - module.fail_json(msg="failed to unpack %s to %s" % (src, dest)) + else: + # do the unpack + try: + res_args['extract_results'] = handler.unarchive() + if res_args['extract_results']['rc'] != 0: + module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args) + except IOError: + module.fail_json(msg="failed to unpack %s to %s" % (src, dest)) + else: + res_args['changed'] = True - res_args['changed'] = True + # do we need to change perms? + for filename in handler.files_in_archive: + file_args['path'] = os.path.join(dest, filename) + res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed']) module.exit_json(**res_args) # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/network/basics/__init__.py b/network/basics/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/network/get_url.py b/network/basics/get_url.py similarity index 96% rename from network/get_url.py rename to network/basics/get_url.py index c3b81129a27..b0d27859420 100644 --- a/network/get_url.py +++ b/network/basics/get_url.py @@ -154,7 +154,7 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10): if info['status'] == 304: module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', '')) - # create a temporary file and copy content to do md5-based replacement + # create a temporary file and copy content to do checksum-based replacement if info['status'] != 200: module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest) @@ -241,8 +241,8 @@ def main(): filename = url_filename(info['url']) dest = os.path.join(dest, filename) - md5sum_src = None - md5sum_dest = None + checksum_src = None + checksum_dest = None # raise an error if there is no tmpsrc file if not os.path.exists(tmpsrc): @@ -251,7 +251,7 @@ def main(): if not os.access(tmpsrc, os.R_OK): os.remove(tmpsrc) module.fail_json( msg="Source %s not readable" % (tmpsrc)) - md5sum_src = module.md5(tmpsrc) + checksum_src = module.sha1(tmpsrc) # check if there is no dest file if os.path.exists(dest): @@ -262,13 +262,13 @@ def main(): if not os.access(dest, os.R_OK): os.remove(tmpsrc) module.fail_json( msg="Destination %s not readable" % (dest)) - md5sum_dest = module.md5(dest) + checksum_dest = module.sha1(dest) else: if not os.access(os.path.dirname(dest), os.W_OK): os.remove(tmpsrc) module.fail_json( msg="Destination %s not writable" % (os.path.dirname(dest))) - if md5sum_src != md5sum_dest: + if checksum_src != checksum_dest: try: shutil.copyfile(tmpsrc, dest) except Exception, err: @@ -303,8 +303,15 @@ def main(): file_args['path'] = dest changed = module.set_fs_attributes_if_different(file_args, changed) + # Backwards compat only. We'll return None on FIPS enabled systems + try: + md5sum = module.md5(dest) + except ValueError: + md5sum = None + # Mission complete - module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum_src, + + module.exit_json(url=url, dest=dest, src=tmpsrc, md5sum=md5sum, checksum=checksum_src, sha256sum=sha256sum, changed=changed, msg=info.get('msg', '')) # import module snippets diff --git a/network/slurp.py b/network/basics/slurp.py similarity index 100% rename from network/slurp.py rename to network/basics/slurp.py diff --git a/network/uri.py b/network/basics/uri.py similarity index 98% rename from network/uri.py rename to network/basics/uri.py index 8d62463df72..aac724a8f13 100644 --- a/network/uri.py +++ b/network/basics/uri.py @@ -194,8 +194,8 @@ def write_file(module, url, dest, content): module.fail_json(msg="failed to create temporary content file: %s" % str(err)) f.close() - md5sum_src = None - md5sum_dest = None + checksum_src = None + checksum_dest = None # raise an error if there is no tmpsrc file if not os.path.exists(tmpsrc): @@ -204,7 +204,7 @@ def write_file(module, url, dest, content): if not os.access(tmpsrc, os.R_OK): os.remove(tmpsrc) module.fail_json( msg="Source %s not readable" % (tmpsrc)) - md5sum_src = module.md5(tmpsrc) + checksum_src = module.sha1(tmpsrc) # check if there is no dest file if os.path.exists(dest): @@ -215,19 +215,19 @@ def write_file(module, url, dest, content): if not os.access(dest, os.R_OK): os.remove(tmpsrc) module.fail_json( msg="Destination %s not readable" % (dest)) - md5sum_dest = module.md5(dest) + checksum_dest = module.sha1(dest) else: if not os.access(os.path.dirname(dest), os.W_OK): os.remove(tmpsrc) module.fail_json( msg="Destination dir %s not writable" % (os.path.dirname(dest))) - - if md5sum_src != md5sum_dest: + + if checksum_src != checksum_dest: try: shutil.copyfile(tmpsrc, dest) except Exception, err: os.remove(tmpsrc) module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err))) - + os.remove(tmpsrc) @@ -426,7 +426,8 @@ def main(): uresp[ukey] = value if 'content_type' in uresp: - if uresp['content_type'].startswith('application/json'): + if uresp['content_type'].startswith('application/json') or \ + uresp['content_type'].startswith('text/json'): try: js = json.loads(content) uresp['json'] = js diff --git a/packaging/language/__init__.py b/packaging/language/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packaging/easy_install.py b/packaging/language/easy_install.py similarity index 100% rename from packaging/easy_install.py rename to packaging/language/easy_install.py diff --git a/packaging/gem.py b/packaging/language/gem.py similarity index 100% rename from packaging/gem.py rename to packaging/language/gem.py diff --git a/packaging/pip.py b/packaging/language/pip.py similarity index 100% rename from packaging/pip.py rename to packaging/language/pip.py diff --git a/packaging/os/__init__.py b/packaging/os/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/packaging/apt.py b/packaging/os/apt.py old mode 100755 new mode 100644 similarity index 80% rename from packaging/apt.py rename to packaging/os/apt.py index e5a38e538d1..9f5b8fd4cda --- a/packaging/apt.py +++ b/packaging/os/apt.py @@ -29,7 +29,7 @@ version_added: "0.0.2" options: name: description: - - A package name, like C(foo), or package specifier with version, like C(foo=1.0). Wildcards (fnmatch) like apt* are also supported. + - A package name, like C(foo), or package specifier with version, like C(foo=1.0). Name wildcards (fnmatch) like C(apt*) and version wildcards like C(foo=1.0*) are also supported. required: false default: null state: @@ -144,6 +144,7 @@ warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning) import os import datetime import fnmatch +import itertools # APT related constants APT_ENV_VARS = dict( @@ -167,12 +168,30 @@ except ImportError: HAS_PYTHON_APT = False def package_split(pkgspec): - parts = pkgspec.split('=') + parts = pkgspec.split('=', 1) if len(parts) > 1: return parts[0], parts[1] else: return parts[0], None +def package_versions(pkgname, pkg, pkg_cache): + try: + versions = set(p.version for p in pkg.versions) + except AttributeError: + # assume older version of python-apt is installed + # apt.package.Package#versions require python-apt >= 0.7.9. + pkg_cache_list = (p for p in pkg_cache.Packages if p.Name == pkgname) + pkg_versions = (p.VersionList for p in pkg_cache_list) + versions = set(p.VerStr for p in itertools.chain(*pkg_versions)) + + return versions + +def package_version_compare(version, other_version): + try: + return apt_pkg.version_compare(version, other_version) + except AttributeError: + return apt_pkg.VersionCompare(version, other_version) + def package_status(m, pkgname, version, cache, state): try: # get the package from the cache, as well as the @@ -183,9 +202,14 @@ def package_status(m, pkgname, version, cache, state): ll_pkg = cache._cache[pkgname] # the low-level package object except KeyError: if state == 'install': - if cache.get_providing_packages(pkgname): + try: + if cache.get_providing_packages(pkgname): + return False, True, False + m.fail_json(msg="No package matching '%s' is available" % pkgname) + except AttributeError: + # python-apt version too old to detect virtual packages + # mark as upgradable and let apt-get install deal with it return False, True, False - m.fail_json(msg="No package matching '%s' is available" % pkgname) else: return False, False, False try: @@ -205,19 +229,35 @@ def package_status(m, pkgname, version, cache, state): # assume older version of python-apt is installed package_is_installed = pkg.isInstalled - if version and package_is_installed: - try: - installed_version = pkg.installed.version - except AttributeError: - installed_version = pkg.installedVersion - return package_is_installed and fnmatch.fnmatch(installed_version, version), False, has_files + if version: + versions = package_versions(pkgname, pkg, cache._cache) + avail_upgrades = fnmatch.filter(versions, version) + + if package_is_installed: + try: + installed_version = pkg.installed.version + except AttributeError: + installed_version = pkg.installedVersion + + # Only claim the package is installed if the version is matched as well + package_is_installed = fnmatch.fnmatch(installed_version, version) + + # Only claim the package is upgradable if a candidate matches the version + package_is_upgradable = False + for candidate in avail_upgrades: + if package_version_compare(candidate, installed_version) > 0: + package_is_upgradable = True + break + else: + package_is_upgradable = bool(avail_upgrades) else: try: package_is_upgradable = pkg.is_upgradable except AttributeError: # assume older version of python-apt is installed package_is_upgradable = pkg.isUpgradable - return package_is_installed, package_is_upgradable, has_files + + return package_is_installed, package_is_upgradable, has_files def expand_dpkg_options(dpkg_options_compressed): options_list = dpkg_options_compressed.split(',') @@ -229,39 +269,54 @@ def expand_dpkg_options(dpkg_options_compressed): def expand_pkgspec_from_fnmatches(m, pkgspec, cache): new_pkgspec = [] - for pkgname_or_fnmatch_pattern in pkgspec: - # note that any of these chars is not allowed in a (debian) pkgname - if [c for c in pkgname_or_fnmatch_pattern if c in "*?[]!"]: - if "=" in pkgname_or_fnmatch_pattern: - m.fail_json(msg="pkgname wildcard and version can not be mixed") + for pkgspec_pattern in pkgspec: + pkgname_pattern, version = package_split(pkgspec_pattern) + + # note that none of these chars is allowed in a (debian) pkgname + if frozenset('*?[]!').intersection(pkgname_pattern): # handle multiarch pkgnames, the idea is that "apt*" should # only select native packages. But "apt*:i386" should still work - if not ":" in pkgname_or_fnmatch_pattern: - matches = fnmatch.filter( - [pkg.name for pkg in cache - if not ":" in pkg.name], pkgname_or_fnmatch_pattern) + if not ":" in pkgname_pattern: + try: + pkg_name_cache = _non_multiarch + except NameError: + pkg_name_cache = _non_multiarch = [pkg.name for pkg in cache if not ':' in pkg.name] else: - matches = fnmatch.filter( - [pkg.name for pkg in cache], pkgname_or_fnmatch_pattern) + try: + pkg_name_cache = _all_pkg_names + except NameError: + pkg_name_cache = _all_pkg_names = [pkg.name for pkg in cache] + matches = fnmatch.filter(pkg_name_cache, pkgname_pattern) if len(matches) == 0: - m.fail_json(msg="No package(s) matching '%s' available" % str(pkgname_or_fnmatch_pattern)) + m.fail_json(msg="No package(s) matching '%s' available" % str(pkgname_pattern)) else: new_pkgspec.extend(matches) else: - new_pkgspec.append(pkgname_or_fnmatch_pattern) + # No wildcards in name + new_pkgspec.append(pkgspec_pattern) return new_pkgspec def install(m, pkgspec, cache, upgrade=False, default_release=None, install_recommends=True, force=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): + pkg_list = [] packages = "" pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) for package in pkgspec: name, version = package_split(package) installed, upgradable, has_files = package_status(m, name, version, cache, state='install') if not installed or (upgrade and upgradable): - packages += "'%s' " % package + pkg_list.append("'%s'" % package) + if installed and upgradable and version: + # This happens when the package is installed, a newer version is + # available, and the version is a wildcard that matches both + # + # We do not apply the upgrade flag because we cannot specify both + # a version and state=latest. (This behaviour mirrors how apt + # treats a version with wildcard in the package) + pkg_list.append("'%s'" % package) + packages = ' '.join(pkg_list) if len(packages) != 0: if force: @@ -303,7 +358,7 @@ def install_deb(m, debs, cache, force, install_recommends, dpkg_options): if pkg.compare_to_version_in_cache() == pkg.VERSION_SAME: continue # Check if package is installable - if not pkg.check(): + if not pkg.check() and not force: m.fail_json(msg=pkg._failure_string) # add any missing deps to the list of deps we need @@ -350,13 +405,14 @@ def install_deb(m, debs, cache, force, install_recommends, dpkg_options): def remove(m, pkgspec, cache, purge=False, dpkg_options=expand_dpkg_options(DPKG_OPTIONS)): - packages = "" + pkg_list = [] pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache) for package in pkgspec: name, version = package_split(package) installed, upgradable, has_files = package_status(m, name, version, cache, state='remove') if installed or (has_files and purge): - packages += "'%s' " % package + pkg_list.append("'%s'" % package) + packages = ' '.join(pkg_list) if len(packages) == 0: m.exit_json(changed=False) @@ -387,6 +443,7 @@ def upgrade(m, mode="yes", force=False, default_release=None, check_arg = '' apt_cmd = None + prompt_regex = None if mode == "dist": # apt-get dist-upgrade apt_cmd = APT_GET_CMD @@ -399,12 +456,13 @@ def upgrade(m, mode="yes", force=False, default_release=None, # aptitude safe-upgrade # mode=yes # default apt_cmd = APTITUDE_CMD upgrade_command = "safe-upgrade" + prompt_regex = r"(^Do you want to ignore this warning and proceed anyway\?|^\*\*\*.*\[default=.*\])" if force: if apt_cmd == APT_GET_CMD: force_yes = '--force-yes' else: - force_yes = '' + force_yes = '--assume-yes --allow-untrusted' else: force_yes = '' @@ -419,7 +477,7 @@ def upgrade(m, mode="yes", force=False, default_release=None, if default_release: cmd += " -t '%s'" % (default_release,) - rc, out, err = m.run_command(cmd) + rc, out, err = m.run_command(cmd, prompt_regex=prompt_regex) if rc: m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out) if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out): @@ -429,7 +487,7 @@ def upgrade(m, mode="yes", force=False, default_release=None, def main(): module = AnsibleModule( argument_spec = dict( - state = dict(default='installed', choices=['installed', 'latest', 'removed', 'absent', 'present']), + state = dict(default='present', choices=['installed', 'latest', 'removed', 'absent', 'present']), update_cache = dict(default=False, aliases=['update-cache'], type='bool'), cache_valid_time = dict(type='int'), purge = dict(default=False, type='bool'), @@ -467,6 +525,12 @@ def main(): install_recommends = p['install_recommends'] dpkg_options = expand_dpkg_options(p['dpkg_options']) + # Deal with deprecated aliases + if p['state'] == 'installed': + p['state'] = 'present' + if p['state'] == 'removed': + p['state'] = 'absent' + try: cache = apt.Cache() if p['default_release']: @@ -517,8 +581,8 @@ def main(): p['default_release'], dpkg_options) if p['deb']: - if p['state'] != "installed": - module.fail_json(msg="deb only supports state=installed") + if p['state'] != 'present': + module.fail_json(msg="deb only supports state=present") install_deb(module, p['deb'], cache, install_recommends=install_recommends, force=force_yes, dpkg_options=p['dpkg_options']) @@ -541,7 +605,7 @@ def main(): module.exit_json(**retvals) else: module.fail_json(**retvals) - elif p['state'] in [ 'installed', 'present' ]: + elif p['state'] == 'present': result = install(module, packages, cache, default_release=p['default_release'], install_recommends=install_recommends,force=force_yes, dpkg_options=dpkg_options) @@ -550,13 +614,16 @@ def main(): module.exit_json(**retvals) else: module.fail_json(**retvals) - elif p['state'] in [ 'removed', 'absent' ]: + elif p['state'] == 'absent': remove(module, packages, cache, p['purge'], dpkg_options) except apt.cache.LockFailedException: module.fail_json(msg="Failed to lock apt for exclusive operation") + except apt.cache.FetchFailedException: + module.fail_json(msg="Could not fetch updated apt files") # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == "__main__": + main() diff --git a/packaging/apt_key.py b/packaging/os/apt_key.py similarity index 98% rename from packaging/apt_key.py rename to packaging/os/apt_key.py index 0a483a97bbc..51901e76e6b 100644 --- a/packaging/apt_key.py +++ b/packaging/os/apt_key.py @@ -81,6 +81,9 @@ options: ''' EXAMPLES = ''' +# Add an apt key by id from a keyserver +- apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9 + # Add an Apt signing key, uses whichever key is at the URL - apt_key: url=https://ftp-master.debian.org/keys/archive-key-6.0.asc state=present diff --git a/packaging/apt_repository.py b/packaging/os/apt_repository.py similarity index 100% rename from packaging/apt_repository.py rename to packaging/os/apt_repository.py diff --git a/packaging/apt_rpm.py b/packaging/os/apt_rpm.py old mode 100755 new mode 100644 similarity index 100% rename from packaging/apt_rpm.py rename to packaging/os/apt_rpm.py diff --git a/packaging/redhat_subscription.py b/packaging/os/redhat_subscription.py similarity index 99% rename from packaging/redhat_subscription.py rename to packaging/os/redhat_subscription.py index df1c043b89c..b5205edc8b5 100644 --- a/packaging/redhat_subscription.py +++ b/packaging/os/redhat_subscription.py @@ -63,11 +63,11 @@ options: EXAMPLES = ''' # Register as user (joe_user) with password (somepass) and auto-subscribe to available content. -- redhat_subscription: action=register username=joe_user password=somepass autosubscribe=true +- redhat_subscription: state=present username=joe_user password=somepass autosubscribe=true # Register with activationkey (1-222333444) and consume subscriptions matching # the names (Red hat Enterprise Server) and (Red Hat Virtualization) -- redhat_subscription: action=register +- redhat_subscription: state=present activationkey=1-222333444 pool='^(Red Hat Enterprise Server|Red Hat Virtualization)$' ''' diff --git a/packaging/rhn_channel.py b/packaging/os/rhn_channel.py similarity index 100% rename from packaging/rhn_channel.py rename to packaging/os/rhn_channel.py diff --git a/packaging/rhn_register.py b/packaging/os/rhn_register.py similarity index 100% rename from packaging/rhn_register.py rename to packaging/os/rhn_register.py diff --git a/packaging/rpm_key.py b/packaging/os/rpm_key.py similarity index 100% rename from packaging/rpm_key.py rename to packaging/os/rpm_key.py diff --git a/packaging/yum.py b/packaging/os/yum.py similarity index 98% rename from packaging/yum.py rename to packaging/os/yum.py index c3158077d18..65d5b43b07c 100644 --- a/packaging/yum.py +++ b/packaging/os/yum.py @@ -96,6 +96,16 @@ options: choices: ["yes", "no"] aliases: [] + update_cache: + description: + - Force updating the cache. Has an effect only if state is I(present) + or I(latest). + required: false + version_added: "1.9" + default: "no" + choices: ["yes", "no"] + aliases: [] + notes: [] # informational: requirements for nodes requirements: [ yum, rpm ] @@ -139,21 +149,13 @@ def log(msg): syslog.openlog('ansible-yum', 0, syslog.LOG_USER) syslog.syslog(syslog.LOG_NOTICE, msg) -def yum_base(conf_file=None, cachedir=False): +def yum_base(conf_file=None): my = yum.YumBase() my.preconf.debuglevel=0 my.preconf.errorlevel=0 if conf_file and os.path.exists(conf_file): my.preconf.fn = conf_file - if cachedir or os.geteuid() != 0: - if hasattr(my, 'setCacheDir'): - my.setCacheDir() - else: - cachedir = yum.misc.getCacheDir() - my.repos.setCacheDir(cachedir) - my.conf.cache = 0 - return my def install_yum_utils(module): @@ -746,6 +748,10 @@ def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo, yum_basecmd.extend(r_cmd) if state in ['installed', 'present', 'latest']: + + if module.params.get('update_cache'): + module.run_command(yum_basecmd + ['makecache']) + my = yum_base(conf_file) try: for r in dis_repos: @@ -803,6 +809,7 @@ def main(): list=dict(), conf_file=dict(default=None), disable_gpg_check=dict(required=False, default="no", type='bool'), + update_cache=dict(required=False, default="no", type='bool'), # this should not be needed, but exists as a failsafe install_repoquery=dict(required=False, default="yes", type='bool'), ), diff --git a/source_control/git.py b/source_control/git.py index a5d94e3dbbe..44ebf06487a 100644 --- a/source_control/git.py +++ b/source_control/git.py @@ -33,12 +33,12 @@ options: description: - git, SSH, or HTTP protocol address of the git repository. dest: - required: false + required: true description: - Absolute path of where the repository should be checked out to. - This parameter is required, unless C(update) is set to C(no) - This change was made in version 1.8. Prior to this version, the - C(dest) parameter was always required. + This parameter is required, unless C(clone) is set to C(no) + This change was made in version 1.8.3. Prior to this version, + the C(dest) parameter was always required. version: required: false default: "HEAD" @@ -80,6 +80,17 @@ options: default: "origin" description: - Name of the remote. + refspec: + required: false + default: null + version_added: "1.9" + description: + - Add an additional refspec to be fetched. + If version is set to a I(SHA-1) not reachable from any branch + or tag, this option may be necessary to specify the ref containing + the I(SHA-1). + Uses the same syntax as the 'git fetch' command. + An example value could be "refs/meta/config". force: required: false default: "yes" @@ -97,13 +108,20 @@ options: - Create a shallow clone with a history truncated to the specified number or revisions. The minimum possible value is C(1), otherwise ignored. + clone: + required: false + default: "yes" + choices: [ "yes", "no" ] + version_added: "1.9" + description: + - If C(no), do not clone the repository if it does not exist locally update: required: false default: "yes" choices: [ "yes", "no" ] version_added: "1.2" description: - - If C(no), just returns information about the repository without updating. + - If C(no), do not retrieve new revisions from the origin repository executable: required: false default: null @@ -128,6 +146,19 @@ options: description: - if C(no), repository will be cloned without the --recursive option, skipping sub-modules. + + track_submodules: + required: false + default: "no" + choices: ["yes", "no"] + version_added: "1.8" + description: + - if C(yes), submodules will track the latest commit on their + master branch (or other branch specified in .gitmodules). If + C(no), submodules will be kept at the revision specified by the + main project. This is equivalent to specifying the --remote flag + to git submodule update. + notes: - "If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, @@ -146,6 +177,13 @@ EXAMPLES = ''' # Example just ensuring the repo checkout exists - git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout update=no + +# Example just get information about the repository whether or not it has +# already been cloned locally. +- git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout clone=no update=no + +# Example checkout a github repo and use refspec to fetch all pull requests +- git: repo=https://github.com/ansible/ansible-examples.git dest=/src/ansible-examples refspec=+refs/pull/*:refs/heads/* ''' import re @@ -236,8 +274,30 @@ def get_version(module, git_path, dest, ref="HEAD"): sha = stdout.rstrip('\n') return sha +def get_submodule_versions(git_path, module, dest, version='HEAD'): + cmd = [git_path, 'submodule', 'foreach', git_path, 'rev-parse', version] + (rc, out, err) = module.run_command(cmd, cwd=dest) + if rc != 0: + module.fail_json(msg='Unable to determine hashes of submodules') + submodules = {} + subm_name = None + for line in out.splitlines(): + if line.startswith("Entering '"): + subm_name = line[10:-1] + elif len(line.strip()) == 40: + if subm_name is None: + module.fail_json() + submodules[subm_name] = line.strip() + subm_name = None + else: + module.fail_json(msg='Unable to parse submodule hash line: %s' % line.strip()) + if subm_name is not None: + module.fail_json(msg='Unable to find hash for submodule: %s' % subm_name) + + return submodules + def clone(git_path, module, repo, dest, remote, depth, version, bare, - reference, recursive): + reference, refspec): ''' makes a new git repo if it does not already exist ''' dest_dirname = os.path.dirname(dest) try: @@ -249,8 +309,6 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare, cmd.append('--bare') else: cmd.extend([ '--origin', remote ]) - if recursive: - cmd.extend([ '--recursive' ]) if is_remote_branch(git_path, module, dest, repo, version) \ or is_remote_tag(git_path, module, dest, repo, version): cmd.extend([ '--branch', version ]) @@ -263,7 +321,10 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare, if bare: if remote != 'origin': module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest) - + + if refspec: + module.run_command([git_path, 'fetch', remote, refspec], check_rc=True, cwd=dest) + def has_local_mods(module, git_path, dest, bare): if bare: return False @@ -287,6 +348,7 @@ def reset(git_path, module, dest): def get_remote_head(git_path, module, dest, version, remote, bare): cloning = False cwd = None + tag = False if remote == module.params['repo']: cloning = True else: @@ -301,7 +363,8 @@ def get_remote_head(git_path, module, dest, version, remote, bare): elif is_remote_branch(git_path, module, dest, remote, version): cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version) elif is_remote_tag(git_path, module, dest, remote, version): - cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version) + tag = True + cmd = '%s ls-remote %s -t refs/tags/%s*' % (git_path, remote, version) else: # appears to be a sha1. return as-is since it appears # cannot check for a specific sha1 on remote @@ -309,6 +372,16 @@ def get_remote_head(git_path, module, dest, version, remote, bare): (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd) if len(out) < 1: module.fail_json(msg="Could not determine remote revision for %s" % version) + + if tag: + # Find the dereferenced tag if this is an annotated tag. + for tag in out.split('\n'): + if tag.endswith(version + '^{}'): + out = tag + break + elif tag.endswith(version): + out = tag + rev = out.split()[0] return rev @@ -399,28 +472,87 @@ def get_head_branch(git_path, module, dest, remote, bare=False): f.close() return branch -def fetch(git_path, module, repo, dest, version, remote, bare): +def fetch(git_path, module, repo, dest, version, remote, bare, refspec): ''' updates repo from remote sources ''' - (rc, out0, err0) = module.run_command([git_path, 'remote', 'set-url', remote, repo], cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to set a new url %s for %s: %s" % (repo, remote, out0 + err0)) - if bare: - (rc, out1, err1) = module.run_command([git_path, 'fetch', remote, '+refs/heads/*:refs/heads/*'], cwd=dest) - else: - (rc, out1, err1) = module.run_command("%s fetch %s" % (git_path, remote), cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to download remote objects and refs") + commands = [("set a new url %s for %s" % (repo, remote), [git_path, 'remote', 'set-url', remote, repo])] + + fetch_str = 'download remote objects and refs' if bare: - (rc, out2, err2) = module.run_command([git_path, 'fetch', remote, '+refs/tags/*:refs/tags/*'], cwd=dest) + refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*'] + if refspec: + refspecs.append(refspec) + commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs)) else: - (rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote), cwd=dest) - if rc != 0: - module.fail_json(msg="Failed to download remote objects and refs") - (rc, out3, err3) = submodule_update(git_path, module, dest) - return (rc, out1 + out2 + out3, err1 + err2 + err3) + # unlike in bare mode, there's no way to combine the + # additional refspec with the default git fetch behavior, + # so use two commands + commands.append((fetch_str, [git_path, 'fetch', remote])) + refspecs = ['+refs/tags/*:refs/tags/*'] + if refspec: + refspecs.append(refspec) + commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs)) + + for (label,command) in commands: + (rc,out,err) = module.run_command(command, cwd=dest) + if rc != 0: + module.fail_json(msg="Failed to %s: %s %s" % (label, out, err)) + +def submodules_fetch(git_path, module, remote, track_submodules, dest): + changed = False -def submodule_update(git_path, module, dest): + if not os.path.exists(os.path.join(dest, '.gitmodules')): + # no submodules + return changed + + gitmodules_file = open(os.path.join(dest, '.gitmodules'), 'r') + for line in gitmodules_file: + # Check for new submodules + if not changed and line.strip().startswith('path'): + path = line.split('=', 1)[1].strip() + # Check that dest/path/.git exists + if not os.path.exists(os.path.join(dest, path, '.git')): + changed = True + + # add the submodule repo's hostkey + if line.strip().startswith('url'): + repo = line.split('=', 1)[1].strip() + if module.params['ssh_opts'] is not None: + if not "-o StrictHostKeyChecking=no" in module.params['ssh_opts']: + add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) + else: + add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) + + # Check for updates to existing modules + if not changed: + # Fetch updates + begin = get_submodule_versions(git_path, module, dest) + cmd = [git_path, 'submodule', 'foreach', git_path, 'fetch'] + (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) + if rc != 0: + module.fail_json(msg="Failed to fetch submodules: %s" % out + err) + + if track_submodules: + # Compare against submodule HEAD + ### FIXME: determine this from .gitmodules + version = 'master' + after = get_submodule_versions(git_path, module, dest, '%s/%s' + % (remote, version)) + if begin != after: + changed = True + else: + # Compare against the superproject's expectation + cmd = [git_path, 'submodule', 'status'] + (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) + if rc != 0: + module.fail_json(msg='Failed to retrieve submodule status: %s' % out + err) + for line in out.splitlines(): + if line[0] != ' ': + changed = True + break + return changed + +def submodule_update(git_path, module, dest, track_submodules): ''' init and update any submodules ''' # get the valid submodule params @@ -431,7 +563,7 @@ def submodule_update(git_path, module, dest): return (0, '', '') cmd = [ git_path, 'submodule', 'sync' ] (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest) - if 'remote' in params: + if 'remote' in params and track_submodules: cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ,'--remote' ] else: cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ] @@ -440,8 +572,8 @@ def submodule_update(git_path, module, dest): module.fail_json(msg="Failed to init/update submodules: %s" % out + err) return (rc, out, err) -def switch_version(git_path, module, dest, remote, version, recursive): - ''' once pulled, switch to a particular SHA, tag, or branch ''' + +def switch_version(git_path, module, dest, remote, version): cmd = '' if version != 'HEAD': if is_remote_branch(git_path, module, dest, remote, version): @@ -466,10 +598,6 @@ def switch_version(git_path, module, dest, remote, version, recursive): module.fail_json(msg="Failed to checkout %s" % (version)) else: module.fail_json(msg="Failed to checkout branch %s" % (branch)) - if recursive: - (rc, out2, err2) = submodule_update(git_path, module, dest) - out1 += out2 - err1 += err1 return (rc, out1, err1) # =========================================== @@ -481,9 +609,11 @@ def main(): repo=dict(required=True, aliases=['name']), version=dict(default='HEAD'), remote=dict(default='origin'), + refspec=dict(default=None), reference=dict(default=None), force=dict(default='yes', type='bool'), depth=dict(default=None, type='int'), + clone=dict(default='yes', type='bool'), update=dict(default='yes', type='bool'), accept_hostkey=dict(default='no', type='bool'), key_file=dict(default=None, required=False), @@ -491,6 +621,7 @@ def main(): executable=dict(default=None), bare=dict(default='no', type='bool'), recursive=dict(default='yes', type='bool'), + track_submodules=dict(default='no', type='bool'), ), supports_check_mode=True ) @@ -499,9 +630,11 @@ def main(): repo = module.params['repo'] version = module.params['version'] remote = module.params['remote'] + refspec = module.params['refspec'] force = module.params['force'] depth = module.params['depth'] update = module.params['update'] + allow_clone = module.params['clone'] bare = module.params['bare'] reference = module.params['reference'] git_path = module.params['executable'] or module.get_bin_path('git', True) @@ -509,8 +642,8 @@ def main(): ssh_opts = module.params['ssh_opts'] gitconfig = None - if not dest and update: - module.fail_json(msg="the destination directory must be specified unless update=no") + if not dest and allow_clone: + module.fail_json(msg="the destination directory must be specified unless clone=no") elif dest: dest = os.path.abspath(os.path.expanduser(dest)) if bare: @@ -518,6 +651,10 @@ def main(): else: gitconfig = os.path.join(dest, '.git', 'config') + # make sure the key_file path is expanded for ~ and $HOME + if key_file is not None: + key_file = os.path.abspath(os.path.expanduser(key_file)) + # create a wrapper script and export # GIT_SSH= as an environment variable # for git to use the wrapper script @@ -531,24 +668,28 @@ def main(): if module.params['ssh_opts'] is not None: if not "-o StrictHostKeyChecking=no" in module.params['ssh_opts']: add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) - else: + else: add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) recursive = module.params['recursive'] + track_submodules = module.params['track_submodules'] rc, out, err, status = (0, None, None, None) before = None local_mods = False - if gitconfig and not os.path.exists(gitconfig) or not gitconfig and not update: - # if there is no git configuration, do a clone operation unless the - # user requested no updates or we're doing a check mode test (in - # which case we do a ls-remote), otherwise clone the repo - if module.check_mode or not update: + repo_updated = None + if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone): + # if there is no git configuration, do a clone operation unless: + # * the user requested no clone (they just want info) + # * we're doing a check mode test + # In those cases we do an ls-remote + if module.check_mode or not allow_clone: remote_head = get_remote_head(git_path, module, dest, version, repo, bare) module.exit_json(changed=True, before=before, after=remote_head) # there's no git config, so clone - clone(git_path, module, repo, dest, remote, depth, version, bare, reference, recursive) + clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec) + repo_updated = True elif not update: # Just return having found a repo already in the dest path # this does no checking that the repo is the actual repo @@ -575,28 +716,49 @@ def main(): elif is_remote_tag(git_path, module, dest, repo, version): # if the remote is a tag and we have the tag locally, exit early if version in get_tags(git_path, module, dest): - module.exit_json(changed=False, before=before, after=remote_head) + repo_updated = False else: - module.exit_json(changed=False, before=before, after=remote_head) - if module.check_mode: - module.exit_json(changed=True, before=before, after=remote_head) - fetch(git_path, module, repo, dest, version, remote, bare) + repo_updated = False + if repo_updated is None: + if module.check_mode: + module.exit_json(changed=True, before=before, after=remote_head) + fetch(git_path, module, repo, dest, version, remote, bare, refspec) + repo_updated = True # switch to version specified regardless of whether - # we cloned or pulled + # we got new revisions from the repository if not bare: - switch_version(git_path, module, dest, remote, version, recursive) + switch_version(git_path, module, dest, remote, version) + + # Deal with submodules + submodules_updated = False + if recursive and not bare: + submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest) + + if module.check_mode: + if submodules_updated: + module.exit_json(changed=True, before=before, after=remote_head, submodules_changed=True) + else: + module.exit_json(changed=False, before=before, after=remote_head) + + if submodules_updated: + # Switch to version specified + submodule_update(git_path, module, dest, track_submodules) # determine if we changed anything after = get_version(module, git_path, dest) - changed = False - if before != after or local_mods: + changed = False + if before != after or local_mods or submodules_updated: changed = True # cleanup the wrapper script if ssh_wrapper: - os.remove(ssh_wrapper) + try: + os.remove(ssh_wrapper) + except OSError: + # No need to fail if the file already doesn't exist + pass module.exit_json(changed=changed, before=before, after=after) diff --git a/source_control/hg.py b/source_control/hg.py index 1b95bcd5ac3..c2bd0d9d953 100644 --- a/source_control/hg.py +++ b/source_control/hg.py @@ -2,6 +2,7 @@ #-*- coding: utf-8 -*- # (c) 2013, Yeukhon Wong +# (c) 2014, Nate Coraor # # This module was originally inspired by Brad Olson's ansible-module-mercurial # . This module tends @@ -49,7 +50,7 @@ options: - Equivalent C(-r) option in hg command which could be the changeset, revision number, branch name or even tag. required: false - default: "default" + default: null aliases: [ version ] force: description: @@ -128,7 +129,10 @@ class Hg(object): if not before: return False - (rc, out, err) = self._command(['update', '-C', '-R', self.dest]) + args = ['update', '-C', '-R', self.dest] + if self.revision is not None: + args = args + ['-r', self.revision] + (rc, out, err) = self._command(args) if rc != 0: self.module.fail_json(msg=err) @@ -170,13 +174,30 @@ class Hg(object): ['pull', '-R', self.dest, self.repo]) def update(self): + if self.revision is not None: + return self._command(['update', '-r', self.revision, '-R', self.dest]) return self._command(['update', '-R', self.dest]) def clone(self): - return self._command(['clone', self.repo, self.dest, '-r', self.revision]) + if self.revision is not None: + return self._command(['clone', self.repo, self.dest, '-r', self.revision]) + return self._command(['clone', self.repo, self.dest]) - def switch_version(self): - return self._command(['update', '-r', self.revision, '-R', self.dest]) + @property + def at_revision(self): + """ + There is no point in pulling from a potentially down/slow remote site + if the desired changeset is already the current changeset. + """ + if self.revision is None or len(self.revision) < 7: + # Assume it's a rev number, tag, or branch + return False + (rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest]) + if rc != 0: + self.module.fail_json(msg=err) + if out.startswith(self.revision): + return True + return False # =========================================== @@ -185,7 +206,7 @@ def main(): argument_spec = dict( repo = dict(required=True, aliases=['name']), dest = dict(required=True), - revision = dict(default="default", aliases=['version']), + revision = dict(default=None, aliases=['version']), force = dict(default='yes', type='bool'), purge = dict(default='no', type='bool'), executable = dict(default=None), @@ -212,6 +233,12 @@ def main(): (rc, out, err) = hg.clone() if rc != 0: module.fail_json(msg=err) + elif hg.at_revision: + # no update needed, don't pull + before = hg.get_revision() + + # but force and purge if desired + cleaned = hg.cleanup(force, purge) else: # get the current state before doing pulling before = hg.get_revision() @@ -227,7 +254,6 @@ def main(): if rc != 0: module.fail_json(msg=err) - hg.switch_version() after = hg.get_revision() if before != after or cleaned: changed = True diff --git a/system/authorized_key.py b/system/authorized_key.py index f964113127e..d5792200b8d 100644 --- a/system/authorized_key.py +++ b/system/authorized_key.py @@ -333,7 +333,7 @@ def enforce_state(module, params): state = params.get("state", "present") key_options = params.get("key_options", None) - # extract indivial keys into an array, skipping blank lines and comments + # extract individual keys into an array, skipping blank lines and comments key = [s for s in key.splitlines() if s and not s.startswith('#')] diff --git a/system/cron.py b/system/cron.py index d14f36253c0..c0a39b61c61 100644 --- a/system/cron.py +++ b/system/cron.py @@ -123,8 +123,8 @@ updates: [ 'Mike Grozak', 'Patrick Callahan' ] EXAMPLES = ''' # Ensure a job that runs at 2 and 5 exists. -# Creates an entry like "* 5,2 * * ls -alh > /dev/null" -- cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null" +# Creates an entry like "0 5,2 * * ls -alh > /dev/null" +- cron: name="check dirs" minute="0" hour="5,2" job="ls -alh > /dev/null" # Ensure an old job is no longer present. Removes any job that is prefixed # by "#Ansible: an old job" from the crontab diff --git a/system/hostname.py b/system/hostname.py old mode 100755 new mode 100644 index a426b59136b..f645a8cdfd3 --- a/system/hostname.py +++ b/system/hostname.py @@ -28,6 +28,7 @@ requirements: [ hostname ] description: - Set system's hostname - Currently implemented on Debian, Ubuntu, Fedora, RedHat, openSUSE, Linaro, ScientificLinux, Arch, CentOS, AMI. + - Any distribution that uses systemd as their init system options: name: required: true @@ -45,18 +46,6 @@ from distutils.version import LooseVersion from ansible.module_utils.basic import * -# wrap get_distribution_version in case it returns a string -def _get_distribution_version(): - distribution_version = get_distribution_version() - - if type(distribution_version) is str: - distribution_version = 0 - elif type(distribution_version) is None: - distribution_version = 0 - - return distribution_version - - class UnimplementedStrategy(object): def __init__(self, module): self.module = module @@ -244,9 +233,9 @@ class RedHatStrategy(GenericStrategy): # =========================================== -class FedoraStrategy(GenericStrategy): +class SystemdStrategy(GenericStrategy): """ - This is a Fedora family Hostname manipulation strategy class - it uses + This is a Systemd hostname manipulation strategy class - it uses the hostnamectl command. """ @@ -298,48 +287,54 @@ class OpenRCStrategy(GenericStrategy): def get_permanent_hostname(self): try: - with open(self.HOSTNAME_FILE, 'r') as f: + try: + f = open(self.HOSTNAME_FILE, 'r') for line in f: line = line.strip() if line.startswith('hostname='): return line[10:].strip('"') - return None - except Exception, err: - self.module.fail_json(msg="failed to read hostname: %s" % - str(err)) + except Exception, err: + self.module.fail_json(msg="failed to read hostname: %s" % str(err)) + finally: + f.close() + + return None def set_permanent_hostname(self, name): try: - with open(self.HOSTNAME_FILE, 'r') as f: + try: + f = open(self.HOSTNAME_FILE, 'r') lines = [x.strip() for x in f] - for i, line in enumerate(lines): - if line.startswith('hostname='): - lines[i] = 'hostname="%s"' % name - break + for i, line in enumerate(lines): + if line.startswith('hostname='): + lines[i] = 'hostname="%s"' % name + break + f.close() - with open(self.HOSTNAME_FILE, 'w') as f: + f = open(self.HOSTNAME_FILE, 'w') f.write('\n'.join(lines) + '\n') - except Exception, err: - self.module.fail_json(msg="failed to update hostname: %s" % - str(err)) + except Exception, err: + self.module.fail_json(msg="failed to update hostname: %s" % str(err)) + finally: + f.close() # =========================================== class FedoraHostname(Hostname): platform = 'Linux' distribution = 'Fedora' - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy class OpenSUSEHostname(Hostname): platform = 'Linux' distribution = 'Opensuse ' - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy class ArchHostname(Hostname): platform = 'Linux' distribution = 'Arch' - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy class RedHat5Hostname(Hostname): platform = 'Linux' @@ -349,48 +344,56 @@ class RedHat5Hostname(Hostname): class RedHatServerHostname(Hostname): platform = 'Linux' distribution = 'Red hat enterprise linux server' - distribution_version = _get_distribution_version() + distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy else: strategy_class = RedHatStrategy class RedHatWorkstationHostname(Hostname): platform = 'Linux' distribution = 'Red hat enterprise linux workstation' - distribution_version = _get_distribution_version() + distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy else: strategy_class = RedHatStrategy class CentOSHostname(Hostname): platform = 'Linux' distribution = 'Centos' - distribution_version = _get_distribution_version() + distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy else: strategy_class = RedHatStrategy class CentOSLinuxHostname(Hostname): platform = 'Linux' distribution = 'Centos linux' - distribution_version = _get_distribution_version() + distribution_version = get_distribution_version() if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): - strategy_class = FedoraStrategy + strategy_class = SystemdStrategy else: strategy_class = RedHatStrategy class ScientificHostname(Hostname): platform = 'Linux' distribution = 'Scientific' - strategy_class = RedHatStrategy + distribution_version = get_distribution_version() + if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): + strategy_class = SystemdStrategy + else: + strategy_class = RedHatStrategy class ScientificLinuxHostname(Hostname): platform = 'Linux' distribution = 'Scientific linux' - strategy_class = RedHatStrategy + distribution_version = get_distribution_version() + if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"): + strategy_class = SystemdStrategy + else: + strategy_class = RedHatStrategy class AmazonLinuxHostname(Hostname): platform = 'Linux' diff --git a/system/mount.py b/system/mount.py old mode 100755 new mode 100644 diff --git a/system/selinux.py b/system/selinux.py index 53e53d1d49c..908bbc250ec 100644 --- a/system/selinux.py +++ b/system/selinux.py @@ -174,14 +174,19 @@ def main(): if (state != runtime_state): if module.check_mode: module.exit_json(changed=True) - if (state == 'disabled'): - msgs.append('state change will take effect next reboot') - else: - if (runtime_enabled): + if (runtime_enabled): + if (state == 'disabled'): + if (runtime_state != 'permissive'): + # Temporarily set state to permissive + set_state('permissive') + msgs.append('runtime state temporarily changed from \'%s\' to \'permissive\', state change will take effect next reboot' % (runtime_state)) + else: + msgs.append('state change will take effect next reboot') + else: set_state(state) msgs.append('runtime state changed from \'%s\' to \'%s\'' % (runtime_state, state)) - else: - msgs.append('state change will take effect next reboot') + else: + msgs.append('state change will take effect next reboot') changed=True if (state != config_state): diff --git a/system/service.py b/system/service.py index b235ee25c57..46cd3dba569 100644 --- a/system/service.py +++ b/system/service.py @@ -25,7 +25,8 @@ author: Michael DeHaan version_added: "0.1" short_description: Manage services. description: - - Controls services on remote hosts. + - Controls services on remote hosts. Supported init systems include BSD init, + OpenRC, SysV, systemd, upstart. options: name: required: true @@ -393,74 +394,43 @@ class LinuxService(Service): for binary in binaries: location[binary] = self.module.get_bin_path(binary) - def check_systemd(name): - # verify service is managed by systemd - if not location.get('systemctl', None): - return False - - # default to .service if the unit type is not specified - if name.find('.') > 0: - unit_name, unit_type = name.rsplit('.', 1) - if unit_type not in ("service", "socket", "device", "mount", "automount", - "swap", "target", "path", "timer", "snapshot"): - name = "%s.service" % name - else: - name = "%s.service" % name + for initdir in initpaths: + initscript = "%s/%s" % (initdir,self.name) + if os.path.isfile(initscript): + self.svc_initscript = initscript - rc, out, err = self.execute_command("%s list-unit-files" % (location['systemctl'])) + def check_systemd(): + # verify systemd is installed (by finding systemctl) + if not location.get('systemctl', False): + return False - # adjust the service name to account for template service unit files - index = name.find('@') - if index != -1: - template_name = name[:index+1] - else: - template_name = name + systemd_enabled = False + # Check if init is the systemd command, using comm as cmdline could be symlink + try: + f = open('/proc/1/comm', 'r') + except IOError, err: + # If comm doesn't exist, old kernel, no systemd + return False - self.__systemd_unit = None - for line in out.splitlines(): - if line.startswith(template_name): - self.__systemd_unit = name + for line in f: + if 'systemd' in line: return True + return False - # Locate a tool for enable options - if location.get('chkconfig', None) and os.path.exists("/etc/init.d/%s" % self.name): - if check_systemd(self.name): - # service is managed by systemd - self.enable_cmd = location['systemctl'] - else: - # we are using a standard SysV service - self.enable_cmd = location['chkconfig'] - elif location.get('update-rc.d', None): - if check_systemd(self.name): - # service is managed by systemd - self.enable_cmd = location['systemctl'] - elif location['initctl'] and os.path.exists("/etc/init/%s.conf" % self.name): - # service is managed by upstart - self.enable_cmd = location['initctl'] - elif location['update-rc.d'] and os.path.exists("/etc/init.d/%s" % self.name): - # service is managed by with SysV init scripts, but with update-rc.d - self.enable_cmd = location['update-rc.d'] - else: - self.module.fail_json(msg="service not found: %s" % self.name) - elif location.get('rc-service', None) and not location.get('systemctl', None): - # service is managed by OpenRC - self.svc_cmd = location['rc-service'] - self.enable_cmd = location['rc-update'] - return - elif check_systemd(self.name): + # Locate a tool to enable/disable a service + if location.get('systemctl',False) and check_systemd(): # service is managed by systemd + self.__systemd_unit = self.name + self.svc_cmd = location['systemctl'] self.enable_cmd = location['systemctl'] - elif location['initctl'] and os.path.exists("/etc/init/%s.conf" % self.name): + + elif location.get('initctl', False) and os.path.exists("/etc/init/%s.conf" % self.name): # service is managed by upstart self.enable_cmd = location['initctl'] - - # if this service is managed via upstart, get the current upstart version - if self.enable_cmd == location['initctl']: - # default the upstart version to something we can compare against + # set the upstart version based on the output of 'initctl version' self.upstart_version = LooseVersion('0.0.0') try: - # set the upstart version based on the output of 'initctl version' version_re = re.compile(r'\(upstart (.*)\)') rc,stdout,stderr = self.module.run_command('initctl version') if rc == 0: @@ -468,40 +438,72 @@ class LinuxService(Service): if res: self.upstart_version = LooseVersion(res.groups()[0]) except: - # we'll use the default of 0.0.0 since we couldn't - # detect the current upstart version above - pass + pass # we'll use the default of 0.0.0 - # Locate a tool for runtime service management (start, stop etc.) - if location.get('service', None) and os.path.exists("/etc/init.d/%s" % self.name): - # SysV init script - self.svc_cmd = location['service'] - elif location.get('start', None) and os.path.exists("/etc/init/%s.conf" % self.name): - # upstart -- rather than being managed by one command, start/stop/restart are actual commands - self.svc_cmd = '' - else: - # still a SysV init script, but /sbin/service isn't installed - for initdir in initpaths: - initscript = "%s/%s" % (initdir,self.name) - if os.path.isfile(initscript): - self.svc_initscript = initscript + if location.get('start', False): + # upstart -- rather than being managed by one command, start/stop/restart are actual commands + self.svc_cmd = '' + + elif location.get('rc-service', False): + # service is managed by OpenRC + self.svc_cmd = location['rc-service'] + self.enable_cmd = location['rc-update'] + return # already have service start/stop tool too! - # couldn't find anything yet, assume systemd - if self.svc_cmd is None and self.svc_initscript is None: - if location.get('systemctl'): - self.svc_cmd = location['systemctl'] + elif self.svc_initscript: + # service is managed by with SysV init scripts + if location.get('update-rc.d', False): + # and uses update-rc.d + self.enable_cmd = location['update-rc.d'] + elif location.get('chkconfig', False): + # and uses chkconfig + self.enable_cmd = location['chkconfig'] + + if self.enable_cmd is None: + self.module.fail_json(msg="no service or tool found for: %s" % self.name) + # If no service control tool selected yet, try to see if 'service' is available + if not self.svc_cmd and location.get('service', False): + self.svc_cmd = location['service'] + + # couldn't find anything yet if self.svc_cmd is None and not self.svc_initscript: self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting') - if location.get('initctl', None): + if location.get('initctl', False): self.svc_initctl = location['initctl'] def get_systemd_status_dict(self): (rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit,)) if rc != 0: self.module.fail_json(msg='failure %d running systemctl show for %r: %s' % (rc, self.__systemd_unit, err)) - return dict(line.split('=', 1) for line in out.splitlines()) + key = None + value_buffer = [] + status_dict = {} + for line in out.splitlines(): + if not key: + key, value = line.split('=', 1) + # systemd fields that are shell commands can be multi-line + # We take a value that begins with a "{" as the start of + # a shell command and a line that ends with "}" as the end of + # the command + if value.lstrip().startswith('{'): + if value.rstrip().endswith('}'): + status_dict[key] = value + key = None + else: + value_buffer.append(value) + else: + status_dict[key] = value + key = None + else: + if line.rstrip().endswith('}'): + status_dict[key] = '\n'.join(value_buffer) + key = None + else: + value_buffer.append(value) + + return status_dict def get_systemd_service_status(self): d = self.get_systemd_status_dict() @@ -593,10 +595,6 @@ class LinuxService(Service): self.changed = True action = None - # FIXME: we use chkconfig or systemctl - # to decide whether to run the command here but need something - # similar for upstart - # # Upstart's initctl # @@ -796,9 +794,9 @@ class LinuxService(Service): (rc, out, err) = self.execute_command("%s %s %s" % args) if rc != 0: if err: - self.module.fail_json(msg=err) + self.module.fail_json(msg="Error when trying to %s %s: rc=%s %s" % (action, self.name, rc, err)) else: - self.module.fail_json(msg=out) + self.module.fail_json(msg="Failure for %s %s: rc=%s %s" % (action, self.name, rc, out)) return (rc, out, err) @@ -944,34 +942,118 @@ class FreeBsdService(Service): class OpenBsdService(Service): """ - This is the OpenBSD Service manipulation class - it uses /etc/rc.d for - service control. Enabling a service is currently not supported because the - _flags variable is not boolean, you should supply a rc.conf.local - file in some other way. + This is the OpenBSD Service manipulation class - it uses rcctl(8) or + /etc/rc.d scripts for service control. Enabling a service is + only supported if rcctl is present. """ platform = 'OpenBSD' distribution = None def get_service_tools(self): - rcdir = '/etc/rc.d' + self.enable_cmd = self.module.get_bin_path('rcctl') + + if self.enable_cmd: + self.svc_cmd = self.enable_cmd + else: + rcdir = '/etc/rc.d' - rc_script = "%s/%s" % (rcdir, self.name) - if os.path.isfile(rc_script): - self.svc_cmd = rc_script + rc_script = "%s/%s" % (rcdir, self.name) + if os.path.isfile(rc_script): + self.svc_cmd = rc_script if not self.svc_cmd: - self.module.fail_json(msg='unable to find rc.d script') + self.module.fail_json(msg='unable to find svc_cmd') def get_service_status(self): - rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'check')) + if self.enable_cmd: + rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svc_cmd, 'check', self.name)) + else: + rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'check')) + + if stderr: + self.module.fail_json(msg=stderr) + if rc == 1: self.running = False elif rc == 0: self.running = True def service_control(self): - return self.execute_command("%s %s" % (self.svc_cmd, self.action)) + if self.enable_cmd: + return self.execute_command("%s -f %s %s" % (self.svc_cmd, self.action, self.name)) + else: + return self.execute_command("%s -f %s" % (self.svc_cmd, self.action)) + + def service_enable(self): + if not self.enable_cmd: + return super(OpenBsdService, self).service_enable() + + rc, stdout, stderr = self.execute_command("%s %s %s" % (self.enable_cmd, 'default', self.name)) + + if stderr: + self.module.fail_json(msg=stderr) + + default_string = stdout.rstrip() + + # Depending on the service the string returned from 'default' may be + # either a set of flags or the boolean YES/NO + if default_string == "YES" or default_string == "NO": + default_flags = '' + else: + default_flags = default_string + + rc, stdout, stderr = self.execute_command("%s %s %s" % (self.enable_cmd, 'status', self.name)) + + if stderr: + self.module.fail_json(msg=stderr) + + status_string = stdout.rstrip() + + # Depending on the service the string returned from 'status' may be + # either a set of flags or the boolean YES/NO + if status_string == "YES" or status_string == "NO": + current_flags = '' + else: + current_flags = status_string + + # If there are arguments from the user we use these as flags unless + # they are already set. + if self.arguments and self.arguments != current_flags: + changed_flags = self.arguments + # If the user has not supplied any arguments and the current flags + # differ from the default we reset them. + elif not self.arguments and current_flags != default_flags: + changed_flags = ' ' + # Otherwise there is no need to modify flags. + else: + changed_flags = '' + + if self.enable: + if rc == 0 and not changed_flags: + return + + action = "enable %s" % (self.name) + if changed_flags: + action = action + " flags %s" % (changed_flags) + else: + if rc == 1: + return + + action = "disable %s" % self.name + + if self.module.check_mode: + self.module.exit_json(changed=True, msg="changing service enablement") + + rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, action)) + + if rc != 0: + if stderr: + self.module.fail_json(msg=stderr) + else: + self.module.fail_json(msg="rcctl failed to modify service enablement") + + self.changed = True # =========================================== # Subclass: NetBSD diff --git a/system/sysctl.py b/system/sysctl.py index acf6395f071..4517c724ca9 100644 --- a/system/sysctl.py +++ b/system/sysctl.py @@ -84,7 +84,7 @@ EXAMPLES = ''' # Set kernel.panic to 3 in /tmp/test_sysctl.conf - sysctl: name=kernel.panic value=3 sysctl_file=/tmp/test_sysctl.conf reload=no -# Set ip fowarding on in /proc and do not reload the sysctl file +# Set ip forwarding on in /proc and do not reload the sysctl file - sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes # Set ip forwarding on in /proc and in the sysctl file and reload if necessary @@ -185,12 +185,20 @@ class SysctlModule(object): def _parse_value(self, value): if value is None: return '' - elif value.lower() in BOOLEANS_TRUE: - return '1' - elif value.lower() in BOOLEANS_FALSE: - return '0' + elif isinstance(value, bool): + if value: + return '1' + else: + return '0' + elif isinstance(value, basestring): + if value.lower() in BOOLEANS_TRUE: + return '1' + elif value.lower() in BOOLEANS_FALSE: + return '0' + else: + return value.strip() else: - return value.strip() + return value # ============================================================== # SYSCTL COMMAND MANAGEMENT @@ -278,10 +286,10 @@ class SysctlModule(object): checked.append(k) if k == self.args['name']: if self.args['state'] == "present": - new_line = "%s = %s\n" % (k, self.args['value']) + new_line = "%s=%s\n" % (k, self.args['value']) self.fixed_lines.append(new_line) else: - new_line = "%s = %s\n" % (k, v) + new_line = "%s=%s\n" % (k, v) self.fixed_lines.append(new_line) if self.args['name'] not in checked and self.args['state'] == "present": diff --git a/system/user.py b/system/user.py index 551384a7a67..d4227630382 100644 --- a/system/user.py +++ b/system/user.py @@ -153,13 +153,14 @@ options: present on target host. ssh_key_file: required: false - default: $HOME/.ssh/id_rsa + default: .ssh/id_rsa version_added: "0.9" description: - - Optionally specify the SSH key filename. + - Optionally specify the SSH key filename. If this is a relative + filename then it will be relative to the user's home directory. ssh_key_comment: required: false - default: ansible-generated + default: ansible-generated on $HOSTNAME version_added: "0.9" description: - Optionally define the comment for the SSH key. @@ -189,8 +190,8 @@ EXAMPLES = ''' # Remove the user 'johnd' - user: name=johnd state=absent remove=yes -# Create a 2048-bit SSH key for user jsmith -- user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 +# Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa +- user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa ''' import os @@ -198,6 +199,7 @@ import pwd import grp import syslog import platform +import socket try: import spwd @@ -262,12 +264,12 @@ class User(object): # select whether we dump additional debug info through syslog self.syslogging = False - def execute_command(self, cmd): + def execute_command(self, cmd, use_unsafe_shell=False, data=None): if self.syslogging: syslog.openlog('ansible-%s' % os.path.basename(__file__)) syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd)) - return self.module.run_command(cmd) + return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) def remove_user_userdel(self): cmd = [self.module.get_bin_path('userdel', True)] @@ -299,7 +301,15 @@ class User(object): # exists with the same name as the user to prevent # errors from useradd trying to create a group when # USERGROUPS_ENAB is set in /etc/login.defs. - cmd.append('-N') + if os.path.exists('/etc/redhat-release'): + dist = platform.dist() + major_release = int(dist[1].split('.')[0]) + if major_release <= 5: + cmd.append('-n') + else: + cmd.append('-N') + else: + cmd.append('-N') if self.groups is not None and len(self.groups): groups = self.get_groups_set() @@ -439,21 +449,23 @@ class User(object): def group_exists(self,group): try: - if group.isdigit(): - if grp.getgrgid(int(group)): - return True - else: - if grp.getgrnam(group): - return True - except KeyError: - return False + # Try group as a gid first + grp.getgrgid(int(group)) + return True + except (ValueError, KeyError): + try: + grp.getgrnam(group) + return True + except KeyError: + return False - def group_info(self,group): + def group_info(self, group): if not self.group_exists(group): return False - if group.isdigit(): - return list(grp.getgrgid(group)) - else: + try: + # Try group as a gid first + return list(grp.getgrgid(int(group))) + except (ValueError, KeyError): return list(grp.getgrnam(group)) def get_groups_set(self, remove_existing=True): @@ -1245,7 +1257,7 @@ class SunOS(User): cmd.append('-G') new_groups = groups if self.append: - new_groups.extend(current_groups) + new_groups.update(current_groups) cmd.append(','.join(new_groups)) if self.comment is not None and info[4] != self.comment: @@ -1357,11 +1369,10 @@ class AIX(User): # set password with chpasswd if self.password is not None: cmd = [] - cmd.append('echo "'+self.name+':'+self.password+'" |') cmd.append(self.module.get_bin_path('chpasswd', True)) cmd.append('-e') cmd.append('-c') - self.execute_command(' '.join(cmd)) + self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password)) return (rc, out, err) @@ -1433,11 +1444,10 @@ class AIX(User): # set password with chpasswd if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd = [] - cmd.append('echo "'+self.name+':'+self.password+'" |') cmd.append(self.module.get_bin_path('chpasswd', True)) cmd.append('-e') cmd.append('-c') - (rc2, out2, err2) = self.execute_command(' '.join(cmd)) + (rc2, out2, err2) = self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password)) else: (rc2, out2, err2) = (None, '', '') @@ -1453,7 +1463,7 @@ def main(): 'bits': '2048', 'type': 'rsa', 'passphrase': None, - 'comment': 'ansible-generated' + 'comment': 'ansible-generated on %s' % socket.gethostname() } module = AnsibleModule( argument_spec = dict( diff --git a/utilities/helper/__init__.py b/utilities/helper/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/utilities/accelerate.py b/utilities/helper/accelerate.py similarity index 100% rename from utilities/accelerate.py rename to utilities/helper/accelerate.py diff --git a/utilities/fireball.py b/utilities/helper/fireball.py similarity index 100% rename from utilities/fireball.py rename to utilities/helper/fireball.py diff --git a/utilities/logic/__init__.py b/utilities/logic/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/utilities/assert.py b/utilities/logic/assert.py similarity index 100% rename from utilities/assert.py rename to utilities/logic/assert.py diff --git a/internal/async_status.py b/utilities/logic/async_status.py similarity index 100% rename from internal/async_status.py rename to utilities/logic/async_status.py diff --git a/internal/async_wrapper.py b/utilities/logic/async_wrapper.py similarity index 100% rename from internal/async_wrapper.py rename to utilities/logic/async_wrapper.py diff --git a/utilities/debug.py b/utilities/logic/debug.py similarity index 100% rename from utilities/debug.py rename to utilities/logic/debug.py diff --git a/utilities/fail.py b/utilities/logic/fail.py similarity index 100% rename from utilities/fail.py rename to utilities/logic/fail.py diff --git a/utilities/include_vars.py b/utilities/logic/include_vars.py similarity index 100% rename from utilities/include_vars.py rename to utilities/logic/include_vars.py diff --git a/utilities/pause.py b/utilities/logic/pause.py similarity index 100% rename from utilities/pause.py rename to utilities/logic/pause.py diff --git a/utilities/set_fact.py b/utilities/logic/set_fact.py similarity index 100% rename from utilities/set_fact.py rename to utilities/logic/set_fact.py diff --git a/utilities/wait_for.py b/utilities/logic/wait_for.py similarity index 97% rename from utilities/wait_for.py rename to utilities/logic/wait_for.py index 5e02712ddff..ae316fe1a17 100644 --- a/utilities/wait_for.py +++ b/utilities/logic/wait_for.py @@ -170,7 +170,7 @@ class TCPConnectionInfo(object): def _get_exclude_ips(self): if self.module.params['exclude_hosts'] is None: return [] - exclude_hosts = self.module.params['exclude_hosts'].split(',') + exclude_hosts = self.module.params['exclude_hosts'] return [ _convert_host_to_hex(h)[1] for h in exclude_hosts ] def get_active_connections_count(self): @@ -221,14 +221,14 @@ class LinuxTCPConnectionInfo(TCPConnectionInfo): def _get_exclude_ips(self): if self.module.params['exclude_hosts'] is None: return [] - exclude_hosts = self.module.params['exclude_hosts'].split(',') + exclude_hosts = self.module.params['exclude_hosts'] return [ _convert_host_to_hex(h) for h in exclude_hosts ] def get_active_connections_count(self): active_connections = 0 f = open(self.source_file[self.family]) for tcp_connection in f.readlines(): - tcp_connection = tcp_connection.strip().split(' ') + tcp_connection = tcp_connection.strip().split() if tcp_connection[self.local_address_field] == 'local_address': continue if tcp_connection[self.connection_state_field] not in self.connection_states: @@ -322,20 +322,16 @@ def main(): state = params['state'] path = params['path'] search_regex = params['search_regex'] - if params['exclude_hosts']: - exclude_hosts = params['exclude_hosts'].split(',') - else: - exclude_hosts = [] - + if port and path: module.fail_json(msg="port and path parameter can not both be passed to wait_for") if path and state == 'stopped': module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module") if path and state == 'drained': module.fail_json(msg="state=drained should only be used for checking a port in the wait_for module") - if exclude_hosts and state != 'drained': + if params['exclude_hosts'] is not None and state != 'drained': module.fail_json(msg="exclude_hosts should only be with state=drained") - + start = datetime.datetime.now() if delay: diff --git a/web_infrastructure/apache2_module.py b/web_infrastructure/apache2_module.py index 39351482087..bd6de56aed2 100644 --- a/web_infrastructure/apache2_module.py +++ b/web_infrastructure/apache2_module.py @@ -49,6 +49,9 @@ import re def _disable_module(module): name = module.params['name'] a2dismod_binary = module.get_bin_path("a2dismod") + if a2dismod_binary is None: + module.fail_json(msg="a2dismod not found. Perhaps this system does not use a2dismod to manage apache") + result, stdout, stderr = module.run_command("%s %s" % (a2dismod_binary, name)) if re.match(r'.*' + name + r' already disabled.*', stdout, re.S): @@ -61,6 +64,9 @@ def _disable_module(module): def _enable_module(module): name = module.params['name'] a2enmod_binary = module.get_bin_path("a2enmod") + if a2enmod_binary is None: + module.fail_json(msg="a2enmod not found. Perhaps this system does not use a2enmod to manage apache") + result, stdout, stderr = module.run_command("%s %s" % (a2enmod_binary, name)) if re.match(r'.*' + name + r' already enabled.*', stdout, re.S): @@ -86,4 +92,5 @@ def main(): # import module snippets from ansible.module_utils.basic import * -main() +if __name__ == '__main__': + main() diff --git a/web_infrastructure/django_manage.py b/web_infrastructure/django_manage.py index 580cc63c2dd..3e34a6388c0 100644 --- a/web_infrastructure/django_manage.py +++ b/web_infrastructure/django_manage.py @@ -170,7 +170,7 @@ def main(): syncdb=('database', ), test=('failfast', 'testrunner', 'liveserver', 'apps', ), validate=(), - migrate=('apps', 'skip', 'merge'), + migrate=('apps', 'skip', 'merge', 'database',), collectstatic=('link', ), ) diff --git a/web_infrastructure/supervisorctl.py b/web_infrastructure/supervisorctl.py index 2d458169e76..f75992b9a6a 100644 --- a/web_infrastructure/supervisorctl.py +++ b/web_infrastructure/supervisorctl.py @@ -210,10 +210,10 @@ def main(): module.fail_json(msg=out, name=name, state=state) if state == 'started': - take_action_on_processes(processes, lambda s: s != 'RUNNING', 'start', 'started') + take_action_on_processes(processes, lambda s: s not in ('RUNNING', 'STARTING'), 'start', 'started') if state == 'stopped': - take_action_on_processes(processes, lambda s: s == 'RUNNING', 'stop', 'stopped') + take_action_on_processes(processes, lambda s: s in ('RUNNING', 'STARTING'), 'stop', 'stopped') # import module snippets from ansible.module_utils.basic import * diff --git a/windows/win_feature.ps1 b/windows/win_feature.ps1 index a0776a4bf1a..a54007b47bf 100644 --- a/windows/win_feature.ps1 +++ b/windows/win_feature.ps1 @@ -23,7 +23,7 @@ Import-Module Servermanager; $params = Parse-Args $args; -$result = New-Object psobject @{ +$result = New-Object PSObject -Property @{ changed = $false } @@ -70,19 +70,33 @@ Else $includemanagementtools = $false } - - If ($state -eq "present") { try { - $featureresult = Add-WindowsFeature -Name $name -Restart:$restart -IncludeAllSubFeature:$includesubfeatures -IncludeManagementTools:$includemanagementtools + If (Get-Command "Install-WindowsFeature" -ErrorAction SilentlyContinue) { + $featureresult = Install-WindowsFeature -Name $name -Restart:$restart -IncludeAllSubFeature:$includesubfeatures -IncludeManagementTools:$includemanagementtools -ErrorAction SilentlyContinue + } + ElseIf (Get-Command "Add-WindowsFeature" -ErrorAction SilentlyContinue) { + $featureresult = Add-WindowsFeature -Name $name -Restart:$restart -IncludeAllSubFeature:$includesubfeatures -ErrorAction SilentlyContinue + } + Else { + Fail-Json $result "Not supported on this version of Windows" + } } catch { Fail-Json $result $_.Exception.Message } } -Elseif ($state -eq "absent") { +ElseIf ($state -eq "absent") { try { - $featureresult = Remove-WindowsFeature -Name $name -Restart:$restart + If (Get-Command "Uninstall-WindowsFeature" -ErrorAction SilentlyContinue) { + $featureresult = Uninstall-WindowsFeature -Name $name -Restart:$restart -ErrorAction SilentlyContinue + } + ElseIf (Get-Command "Remove-WindowsFeature" -ErrorAction SilentlyContinue) { + $featureresult = Remove-WindowsFeature -Name $name -Restart:$restart -ErrorAction SilentlyContinue + } + Else { + Fail-Json $result "Not supported on this version of Windows" + } } catch { Fail-Json $result $_.Exception.Message @@ -93,30 +107,40 @@ Elseif ($state -eq "absent") { # each role/feature that is installed/removed $installed_features = @() #$featureresult.featureresult is filled if anything was changed -if ($featureresult.FeatureResult) +If ($featureresult.FeatureResult) { ForEach ($item in $featureresult.FeatureResult) { - $installed_features += New-Object psobject @{ - id = $item.id.ToString() + $message = @() + ForEach ($msg in $item.Message) { + $message += New-Object PSObject -Property @{ + message_type = $msg.MessageType.ToString() + error_code = $msg.ErrorCode + text = $msg.Text + } + } + $installed_features += New-Object PSObject -Property @{ + id = $item.Id display_name = $item.DisplayName - message = $item.Message.ToString() - restart_needed = $item.RestartNeeded.ToString() + message = $message + restart_needed = $item.RestartNeeded.ToString() | ConvertTo-Bool skip_reason = $item.SkipReason.ToString() - success = $item.Success.ToString() + success = $item.Success.ToString() | ConvertTo-Bool } } - Set-Attr $result "feature_result" $installed_features - - $result.changed = $true } -Else -{ - Set-Attr $result "feature_result" $null -} -Set-Attr $result "feature_success" $featureresult.Success.ToString() -Set-Attr $result "feature_exitcode" $featureresult.ExitCode.ToString() -Set-Attr $result "feature_restart_needed" $featureresult.RestartNeeded.ToString() +Set-Attr $result "feature_result" $installed_features +Set-Attr $result "success" ($featureresult.Success.ToString() | ConvertTo-Bool) +Set-Attr $result "exitcode" $featureresult.ExitCode.ToString() +Set-Attr $result "restart_needed" ($featureresult.RestartNeeded.ToString() | ConvertTo-Bool) -Exit-Json $result; +If ($result.success) { + Exit-Json $result +} +ElseIf ($state -eq "present") { + Fail-Json $result "Failed to add feature" +} +Else { + Fail-Json $result "Failed to remove feature" +} diff --git a/windows/win_user.ps1 b/windows/win_user.ps1 index 306d7a0db2f..ae4847a8528 100644 --- a/windows/win_user.ps1 +++ b/windows/win_user.ps1 @@ -20,6 +20,9 @@ # POWERSHELL_COMMON ######## +$ADS_UF_PASSWD_CANT_CHANGE = 64 +$ADS_UF_DONT_EXPIRE_PASSWD = 65536 + $adsi = [ADSI]"WinNT://$env:COMPUTERNAME" function Get-User($user) { @@ -27,22 +30,23 @@ function Get-User($user) { return } -function Create-User([string]$user, [string]$passwd) { - $adsiuser = $adsi.Create("User", $user) - $adsiuser.SetPassword($passwd) - $adsiuser.SetInfo() - $adsiuser - return +function Get-UserFlag($user, $flag) { + If ($user.UserFlags[0] -band $flag) { + $true + } + Else { + $false + } } -function Update-Password($user, [string]$passwd) { - $user.SetPassword($passwd) - $user.SetInfo() +function Set-UserFlag($user, $flag) { + $user.UserFlags = ($user.UserFlags[0] -BOR $flag) } -function Delete-User($user) { - $adsi.delete("user", $user.Name.Value) +function Clear-UserFlag($user, $flag) { + $user.UserFlags = ($user.UserFlags[0] -BXOR $flag) } + ######## $params = Parse-Args $args; @@ -51,56 +55,194 @@ $result = New-Object psobject @{ changed = $false }; -If (-not $params.name.GetType) -{ +If (-not $params.name.GetType) { Fail-Json $result "missing required arguments: name" } +$username = Get-Attr $params "name" +$fullname = Get-Attr $params "fullname" +$description = Get-Attr $params "description" +$password = Get-Attr $params "password" + If ($params.state) { $state = $params.state.ToString().ToLower() - If (($state -ne 'present') -and ($state -ne 'absent')) { - Fail-Json $result "state is '$state'; must be 'present' or 'absent'" + If (($state -ne 'present') -and ($state -ne 'absent') -and ($state -ne 'query')) { + Fail-Json $result "state is '$state'; must be 'present', 'absent' or 'query'" } } -Elseif (!$params.state) { +ElseIf (!$params.state) { $state = "present" } -If ((-not $params.password.GetType) -and ($state -eq 'present')) -{ - Fail-Json $result "missing required arguments: password" +If ($params.update_password) { + $update_password = $params.update_password.ToString().ToLower() + If (($update_password -ne 'always') -and ($update_password -ne 'on_create')) { + Fail-Json $result "update_password is '$update_password'; must be 'always' or 'on_create'" + } +} +ElseIf (!$params.update_password) { + $update_password = "always" } -$username = Get-Attr $params "name" -$password = Get-Attr $params "password" +$password_expired = Get-Attr $params "password_expired" $null +If ($password_expired -ne $null) { + $password_expired = $password_expired | ConvertTo-Bool +} + +$password_never_expires = Get-Attr $params "password_never_expires" $null +If ($password_never_expires -ne $null) { + $password_never_expires = $password_never_expires | ConvertTo-Bool +} + +$user_cannot_change_password = Get-Attr $params "user_cannot_change_password" $null +If ($user_cannot_change_password -ne $null) { + $user_cannot_change_password = $user_cannot_change_password | ConvertTo-Bool +} + +$account_disabled = Get-Attr $params "account_disabled" $null +If ($account_disabled -ne $null) { + $account_disabled = $account_disabled | ConvertTo-Bool +} + +$account_locked = Get-Attr $params "account_locked" $null +If ($account_locked -ne $null) { + $account_locked = $account_locked | ConvertTo-Bool + if ($account_locked) { + Fail-Json $result "account_locked must be set to 'no' if provided" + } +} + +$groups = Get-Attr $params "groups" $null +If ($groups -ne $null) { + If ($groups.GetType().Name -eq "String") { + [string[]]$groups = $groups.Split(",") + } + ElseIf ($groups.GetType().Name -ne "Object[]") { + Fail-Json $result "groups must be a string or array" + } + $groups = $groups | ForEach { ([string]$_).Trim() } | Where { $_ } + If ($groups -eq $null) { + $groups = @() + } +} + +If ($params.groups_action) { + $groups_action = $params.groups_action.ToString().ToLower() + If (($groups_action -ne 'replace') -and ($groups_action -ne 'add') -and ($groups_action -ne 'remove')) { + Fail-Json $result "groups_action is '$groups_action'; must be 'replace', 'add' or 'remove'" + } +} +ElseIf (!$params.groups_action) { + $groups_action = "replace" +} $user_obj = Get-User $username -if ($state -eq 'present') { +If ($state -eq 'present') { # Add or update user try { - if ($user_obj.GetType) { - Update-Password $user_obj $password + If (!$user_obj.GetType) { + $user_obj = $adsi.Create("User", $username) + If ($password -ne $null) { + $user_obj.SetPassword($password) + } + $result.changed = $true + } + ElseIf (($password -ne $null) -and ($update_password -eq 'always')) { + [void][system.reflection.assembly]::LoadWithPartialName('System.DirectoryServices.AccountManagement') + $host_name = [System.Net.Dns]::GetHostName() + $pc = New-Object -TypeName System.DirectoryServices.AccountManagement.PrincipalContext 'Machine', $host_name + # ValidateCredentials fails if PasswordExpired == 1 + If (!$pc.ValidateCredentials($username, $password)) { + $user_obj.SetPassword($password) + $result.changed = $true + } + } + If (($fullname -ne $null) -and ($fullname -ne $user_obj.FullName[0])) { + $user_obj.FullName = $fullname + $result.changed = $true + } + If (($description -ne $null) -and ($description -ne $user_obj.Description[0])) { + $user_obj.Description = $description + $result.changed = $true + } + If (($password_expired -ne $null) -and ($password_expired -ne ($user_obj.PasswordExpired | ConvertTo-Bool))) { + $user_obj.PasswordExpired = If ($password_expired) { 1 } Else { 0 } + $result.changed = $true + } + If (($password_never_expires -ne $null) -and ($password_never_expires -ne (Get-UserFlag $user_obj $ADS_UF_DONT_EXPIRE_PASSWD))) { + If ($password_never_expires) { + Set-UserFlag $user_obj $ADS_UF_DONT_EXPIRE_PASSWD + } + Else { + Clear-UserFlag $user_obj $ADS_UF_DONT_EXPIRE_PASSWD + } + $result.changed = $true } - else { - Create-User $username $password + If (($user_cannot_change_password -ne $null) -and ($user_cannot_change_password -ne (Get-UserFlag $user_obj $ADS_UF_PASSWD_CANT_CHANGE))) { + If ($user_cannot_change_password) { + Set-UserFlag $user_obj $ADS_UF_PASSWD_CANT_CHANGE + } + Else { + Clear-UserFlag $user_obj $ADS_UF_PASSWD_CANT_CHANGE + } + $result.changed = $true + } + If (($account_disabled -ne $null) -and ($account_disabled -ne $user_obj.AccountDisabled)) { + $user_obj.AccountDisabled = $account_disabled + $result.changed = $true + } + If (($account_locked -ne $null) -and ($account_locked -ne $user_obj.IsAccountLocked)) { + $user_obj.IsAccountLocked = $account_locked + $result.changed = $true + } + If ($result.changed) { + $user_obj.SetInfo() + } + If ($groups.GetType) { + [string[]]$current_groups = $user_obj.Groups() | ForEach { $_.GetType().InvokeMember("Name", "GetProperty", $null, $_, $null) } + If (($groups_action -eq "remove") -or ($groups_action -eq "replace")) { + ForEach ($grp in $current_groups) { + If ((($groups_action -eq "remove") -and ($groups -contains $grp)) -or (($groups_action -eq "replace") -and ($groups -notcontains $grp))) { + $group_obj = $adsi.Children | where { $_.SchemaClassName -eq 'Group' -and $_.Name -eq $grp } + If ($group_obj.GetType) { + $group_obj.Remove($user_obj.Path) + $result.changed = $true + } + Else { + Fail-Json $result "group '$grp' not found" + } + } + } + } + If (($groups_action -eq "add") -or ($groups_action -eq "replace")) { + ForEach ($grp in $groups) { + If ($current_groups -notcontains $grp) { + $group_obj = $adsi.Children | where { $_.SchemaClassName -eq 'Group' -and $_.Name -eq $grp } + If ($group_obj.GetType) { + $group_obj.Add($user_obj.Path) + $result.changed = $true + } + Else { + Fail-Json $result "group '$grp' not found" + } + } + } + } } - $result.changed = $true - $user_obj = Get-User $username } catch { Fail-Json $result $_.Exception.Message } } -else { +ElseIf ($state -eq 'absent') { # Remove user try { - if ($user_obj.GetType) { - Delete-User $user_obj + If ($user_obj.GetType) { + $username = $user_obj.Name.Value + $adsi.delete("User", $user_obj.Name.Value) $result.changed = $true - } - else { - Set-Attr $result "msg" "User '$username' was not found" + $user_obj = $null } } catch { @@ -108,9 +250,38 @@ else { } } -# Set-Attr $result "user" $user_obj -Set-Attr $result "user_name" $user_obj.Name -Set-Attr $result "user_fullname" $user_obj.FullName -Set-Attr $result "user_path" $user_obj.Path +try { + If ($user_obj.GetType) { + $user_obj.RefreshCache() + Set-Attr $result "name" $user_obj.Name[0] + Set-Attr $result "fullname" $user_obj.FullName[0] + Set-Attr $result "path" $user_obj.Path + Set-Attr $result "description" $user_obj.Description[0] + Set-Attr $result "password_expired" ($user_obj.PasswordExpired | ConvertTo-Bool) + Set-Attr $result "password_never_expires" (Get-UserFlag $user_obj $ADS_UF_DONT_EXPIRE_PASSWD) + Set-Attr $result "user_cannot_change_password" (Get-UserFlag $user_obj $ADS_UF_PASSWD_CANT_CHANGE) + Set-Attr $result "account_disabled" $user_obj.AccountDisabled + Set-Attr $result "account_locked" $user_obj.IsAccountLocked + Set-Attr $result "sid" (New-Object System.Security.Principal.SecurityIdentifier($user_obj.ObjectSid.Value, 0)).Value + $user_groups = @() + ForEach ($grp in $user_obj.Groups()) { + $group_result = New-Object psobject @{ + name = $grp.GetType().InvokeMember("Name", "GetProperty", $null, $grp, $null) + path = $grp.GetType().InvokeMember("ADsPath", "GetProperty", $null, $grp, $null) + } + $user_groups += $group_result; + } + Set-Attr $result "groups" $user_groups + Set-Attr $result "state" "present" + } + Else { + Set-Attr $result "name" $username + Set-Attr $result "msg" "User '$username' was not found" + Set-Attr $result "state" "absent" + } +} +catch { + Fail-Json $result $_.Exception.Message +} -Exit-Json $result; +Exit-Json $result diff --git a/windows/win_user.py b/windows/win_user.py index e2da6a1ddb8..82bcf0897ec 100644 --- a/windows/win_user.py +++ b/windows/win_user.py @@ -31,32 +31,109 @@ description: options: name: description: - - Username of the user to manage + - Name of the user to create, remove or modify. required: true + fullname: + description: + - Full name of the user + required: false default: null - aliases: [] + version_added: "1.9" + description: + description: + - Description of the user + required: false + default: null + version_added: "1.9" password: description: - - Password for the user (plain text) - required: true + - Optionally set the user's password to this (plain text) value. + required: false default: null - aliases: [] + update_password: + description: + - C(always) will update passwords if they differ. C(on_create) will + only set the password for newly created users. + required: false + choices: [ 'always', 'on_create' ] + default: always + version_added: "1.9" + password_expired: + description: + - C(yes) will require the user to change their password at next login. + C(no) will clear the expired password flag. + required: false + choices: [ 'yes', 'no' ] + default: null + version_added: "1.9" + password_never_expires: + description: + - C(yes) will set the password to never expire. C(no) will allow the + password to expire. + required: false + choices: [ 'yes', 'no' ] + default: null + version_added: "1.9" + user_cannot_change_password: + description: + - C(yes) will prevent the user from changing their password. C(no) will + allow the user to change their password. + required: false + choices: [ 'yes', 'no' ] + default: null + version_added: "1.9" + account_disabled: + description: + - C(yes) will disable the user account. C(no) will clear the disabled + flag. + required: false + choices: [ 'yes', 'no' ] + default: null + version_added: "1.9" + account_locked: + description: + - C(no) will unlock the user account if locked. + required: false + choices: [ 'no' ] + default: null + version_added: "1.9" + groups: + description: + - Adds or removes the user from this comma-separated lis of groups, + depending on the value of I(groups_action). When I(groups_action) is + C(replace) and I(groups) is set to the empty string ('groups='), the + user is removed from all groups. + required: false + version_added: "1.9" + groups_action: + description: + - If C(replace), the user is added as a member of each group in + I(groups) and removed from any other groups. If C(add), the user is + added to each group in I(groups) where not already a member. If + C(remove), the user is removed from each group in I(groups). + required: false + choices: [ "replace", "add", "remove" ] + default: "replace" + version_added: "1.9" state: description: - - Whether to create or delete a user + - When C(present), creates or updates the user account. When C(absent), + removes the user account if it exists. When C(query) (new in 1.9), + retrieves the user account details without making any changes. required: false choices: - present - absent + - query default: present aliases: [] -author: Paul Durivage +author: Paul Durivage / Chris Church ''' EXAMPLES = ''' # Ad-hoc example -$ ansible -i hosts -m win_user -a "name=bob password=Password12345" all -$ ansible -i hosts -m win_user -a "name=bob password=Password12345 state=absent" all +$ ansible -i hosts -m win_user -a "name=bob password=Password12345 groups=Users" all +$ ansible -i hosts -m win_user -a "name=bob state=absent" all # Playbook example --- @@ -68,4 +145,5 @@ $ ansible -i hosts -m win_user -a "name=bob password=Password12345 state=absent" win_user: name: ansible password: "@ns1bl3" + groups: ["Users"] '''