fixing merge conflict

reviewable/pr18780/r1
Nathaniel Felsen 10 years ago
commit 2e74b17fad

@ -0,0 +1 @@
2.0.0-0.3.beta1

@ -241,7 +241,7 @@ def main():
stack_name=dict(required=True), stack_name=dict(required=True),
template_parameters=dict(required=False, type='dict', default={}), template_parameters=dict(required=False, type='dict', default={}),
state=dict(default='present', choices=['present', 'absent']), state=dict(default='present', choices=['present', 'absent']),
template=dict(default=None, required=False), template=dict(default=None, required=False, type='path'),
notification_arns=dict(default=None, required=False), notification_arns=dict(default=None, required=False),
stack_policy=dict(default=None, required=False), stack_policy=dict(default=None, required=False),
disable_rollback=dict(default=False, type='bool'), disable_rollback=dict(default=False, type='bool'),
@ -368,6 +368,16 @@ def main():
for output in stack.outputs: for output in stack.outputs:
stack_outputs[output.key] = output.value stack_outputs[output.key] = output.value
result['stack_outputs'] = stack_outputs result['stack_outputs'] = stack_outputs
stack_resources = []
for res in cfn.list_stack_resources(stack_name):
stack_resources.append({
"last_updated_time": res.last_updated_time,
"logical_resource_id": res.logical_resource_id,
"physical_resource_id": res.physical_resource_id,
"status": res.resource_status,
"status_reason": res.resource_status_reason,
"resource_type": res.resource_type })
result['stack_resources'] = stack_resources
# absent state is different because of the way delete_stack works. # absent state is different because of the way delete_stack works.
# problem is it it doesn't give an error if stack isn't found # problem is it it doesn't give an error if stack isn't found

@ -216,7 +216,7 @@ options:
volumes: volumes:
version_added: "1.5" version_added: "1.5"
description: description:
- "a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. Encrypt the volume by passing 'encrypted: true' in the volume dict." - a list of hash/dictionaries of volumes to add to the new instance; '[{"key":"value", "key":"value"}]'; keys allowed are - device_name (str; required), delete_on_termination (bool; False), device_type (deprecated), ephemeral (str), encrypted (bool; False), snapshot (str), volume_type (str), iops (int) - device_type is deprecated use volume_type, iops must be set when volume_type='io1', ephemeral and snapshot are mutually exclusive.
required: false required: false
default: null default: null
aliases: [] aliases: []
@ -295,7 +295,7 @@ EXAMPLES = '''
volumes: volumes:
- device_name: /dev/sdb - device_name: /dev/sdb
snapshot: snap-abcdef12 snapshot: snap-abcdef12
device_type: io1 volume_type: io1
iops: 1000 iops: 1000
volume_size: 100 volume_size: 100
delete_on_termination: true delete_on_termination: true
@ -710,11 +710,21 @@ def create_block_device(module, ec2, volume):
# Not aware of a way to determine this programatically # Not aware of a way to determine this programatically
# http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/ # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
MAX_IOPS_TO_SIZE_RATIO = 30 MAX_IOPS_TO_SIZE_RATIO = 30
# device_type has been used historically to represent volume_type,
# however ec2_vol uses volume_type, as does the BlockDeviceType, so
# we add handling for either/or but not both
if all(key in volume for key in ['device_type','volume_type']):
module.fail_json(msg = 'device_type is a deprecated name for volume_type. Do not use both device_type and volume_type')
# get whichever one is set, or NoneType if neither are set
volume_type = volume.get('device_type') or volume.get('volume_type')
if 'snapshot' not in volume and 'ephemeral' not in volume: if 'snapshot' not in volume and 'ephemeral' not in volume:
if 'volume_size' not in volume: if 'volume_size' not in volume:
module.fail_json(msg = 'Size must be specified when creating a new volume or modifying the root volume') module.fail_json(msg = 'Size must be specified when creating a new volume or modifying the root volume')
if 'snapshot' in volume: if 'snapshot' in volume:
if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume: if volume_type == 'io1' and 'iops' not in volume:
module.fail_json(msg = 'io1 volumes must have an iops value set') module.fail_json(msg = 'io1 volumes must have an iops value set')
if 'iops' in volume: if 'iops' in volume:
snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0] snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0]
@ -729,10 +739,11 @@ def create_block_device(module, ec2, volume):
return BlockDeviceType(snapshot_id=volume.get('snapshot'), return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'), ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'), size=volume.get('volume_size'),
volume_type=volume.get('device_type'), volume_type=volume_type,
delete_on_termination=volume.get('delete_on_termination', False), delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'), iops=volume.get('iops'),
encrypted=volume.get('encrypted', None)) encrypted=volume.get('encrypted', None))
def boto_supports_param_in_spot_request(ec2, param): def boto_supports_param_in_spot_request(ec2, param):
""" """
Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0. Check if Boto library has a <param> in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
@ -1215,8 +1226,12 @@ def startstop_instances(module, ec2, instance_ids, state, instance_tags):
wait = module.params.get('wait') wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout')) wait_timeout = int(module.params.get('wait_timeout'))
source_dest_check = module.params.get('source_dest_check')
termination_protection = module.params.get('termination_protection')
changed = False changed = False
instance_dict_array = [] instance_dict_array = []
source_dest_check = module.params.get('source_dest_check')
termination_protection = module.params.get('termination_protection')
if not isinstance(instance_ids, list) or len(instance_ids) < 1: if not isinstance(instance_ids, list) or len(instance_ids) < 1:
# Fail unless the user defined instance tags # Fail unless the user defined instance tags

@ -47,12 +47,6 @@ options:
- create or deregister/delete image - create or deregister/delete image
required: false required: false
default: 'present' default: 'present'
region:
description:
- The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
description: description:
description: description:
- An optional human-readable string describing the contents and purpose of the AMI. - An optional human-readable string describing the contents and purpose of the AMI.
@ -72,7 +66,8 @@ options:
device_mapping: device_mapping:
version_added: "2.0" version_added: "2.0"
description: description:
- An optional list of devices with custom configurations (same block-device-mapping parameters) - An optional list of device hashes/dictionaries with custom configurations (same block-device-mapping parameters)
- "Valid properties include: device_name, volume_type, size (in GB), delete_on_termination (boolean), no_device (boolean), snapshot_id, iops (for io1 volume_type)"
required: false required: false
default: null default: null
delete_snapshot: delete_snapshot:
@ -88,7 +83,9 @@ options:
version_added: "2.0" version_added: "2.0"
author: "Evan Duffield (@scicoin-project) <eduffield@iacquire.com>" author: "Evan Duffield (@scicoin-project) <eduffield@iacquire.com>"
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
''' '''
# Thank you to iAcquire for sponsoring development of this module. # Thank you to iAcquire for sponsoring development of this module.
@ -133,6 +130,21 @@ EXAMPLES = '''
volume_type: gp2 volume_type: gp2
register: instance register: instance
# AMI Creation, excluding a volume attached at /dev/sdb
- ec2_ami
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx
name: newtest
device_mapping:
- device_name: /dev/sda1
size: XXX
delete_on_termination: true
volume_type: gp2
- device_name: /dev/sdb
no_device: yes
register: instance
# Deregister/Delete AMI # Deregister/Delete AMI
- ec2_ami: - ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx

@ -80,11 +80,6 @@ options:
required: false required: false
version_added: "1.8" version_added: "1.8"
default: True default: True
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
vpc_zone_identifier: vpc_zone_identifier:
description: description:
- List of VPC subnets to use - List of VPC subnets to use
@ -134,7 +129,9 @@ options:
default: Default default: Default
choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default'] choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default']
version_added: "2.0" version_added: "2.0"
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
""" """
EXAMPLES = ''' EXAMPLES = '''
@ -258,9 +255,10 @@ def get_properties(autoscaling_group):
properties['viable_instances'] = 0 properties['viable_instances'] = 0
properties['terminating_instances'] = 0 properties['terminating_instances'] = 0
instance_facts = {}
if autoscaling_group.instances: if autoscaling_group.instances:
properties['instances'] = [i.instance_id for i in autoscaling_group.instances] properties['instances'] = [i.instance_id for i in autoscaling_group.instances]
instance_facts = {}
for i in autoscaling_group.instances: for i in autoscaling_group.instances:
instance_facts[i.instance_id] = {'health_status': i.health_status, instance_facts[i.instance_id] = {'health_status': i.health_status,
'lifecycle_state': i.lifecycle_state, 'lifecycle_state': i.lifecycle_state,

@ -40,12 +40,6 @@ options:
required: false required: false
choices: ['present', 'absent'] choices: ['present', 'absent']
default: present default: present
region:
description:
- the EC2 region to use
required: false
default: null
aliases: [ ec2_region ]
in_vpc: in_vpc:
description: description:
- allocate an EIP inside a VPC or not - allocate an EIP inside a VPC or not
@ -64,7 +58,9 @@ options:
required: false required: false
default: false default: false
version_added: "2.0" version_added: "2.0"
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
author: "Lorin Hochstein (@lorin) <lorin@nimbisservices.com>" author: "Lorin Hochstein (@lorin) <lorin@nimbisservices.com>"
author: "Rick Mendes (@rickmendes) <rmendes@illumina.com>" author: "Rick Mendes (@rickmendes) <rmendes@illumina.com>"
notes: notes:

@ -41,11 +41,6 @@ options:
- List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register. - List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
required: false required: false
default: None default: None
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
enable_availability_zone: enable_availability_zone:
description: description:
- Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already - Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already
@ -73,7 +68,9 @@ options:
required: false required: false
default: 0 default: 0
version_added: "1.6" version_added: "1.6"
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
""" """
EXAMPLES = """ EXAMPLES = """
@ -85,7 +82,7 @@ pre_tasks:
local_action: local_action:
module: ec2_elb module: ec2_elb
instance_id: "{{ ansible_ec2_instance_id }}" instance_id: "{{ ansible_ec2_instance_id }}"
state: 'absent' state: absent
roles: roles:
- myrole - myrole
post_tasks: post_tasks:
@ -94,7 +91,7 @@ post_tasks:
module: ec2_elb module: ec2_elb
instance_id: "{{ ansible_ec2_instance_id }}" instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}" ec2_elbs: "{{ item }}"
state: 'present' state: present
with_items: ec2_elbs with_items: ec2_elbs
""" """

@ -29,6 +29,7 @@ options:
state: state:
description: description:
- Create or destroy the ELB - Create or destroy the ELB
choices: ["present", "absent"]
required: true required: true
name: name:
description: description:
@ -74,11 +75,6 @@ options:
- An associative array of access logs configuration settings (see example) - An associative array of access logs configuration settings (see example)
require: false require: false
default: None default: None
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
subnets: subnets:
description: description:
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this. - A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
@ -126,7 +122,9 @@ options:
required: false required: false
version_added: "2.0" version_added: "2.0"
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
""" """
EXAMPLES = """ EXAMPLES = """

@ -45,12 +45,6 @@ options:
- List of firewall outbound rules to enforce in this group (see example). If none are supplied, a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled. - List of firewall outbound rules to enforce in this group (see example). If none are supplied, a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
required: false required: false
version_added: "1.6" version_added: "1.6"
region:
description:
- the EC2 region to use
required: false
default: null
aliases: []
state: state:
version_added: "1.4" version_added: "1.4"
description: description:
@ -74,7 +68,9 @@ options:
default: 'true' default: 'true'
aliases: [] aliases: []
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
notes: notes:
- If a rule declares a group_name and that group doesn't exist, it will be - If a rule declares a group_name and that group doesn't exist, it will be
@ -116,6 +112,10 @@ EXAMPLES = '''
from_port: 10051 from_port: 10051
to_port: 10051 to_port: 10051
group_id: sg-12345678 group_id: sg-12345678
- proto: icmp
from_port: 8 # icmp type, -1 = any type
to_port: -1 # icmp subtype, -1 = any subtype
cidr_ip: 10.0.0.0/8
- proto: all - proto: all
# the containing group name may be specified here # the containing group name may be specified here
group_name: example group_name: example

@ -31,12 +31,6 @@ options:
description: description:
- Public key material. - Public key material.
required: false required: false
region:
description:
- the EC2 region to use
required: false
default: null
aliases: []
state: state:
description: description:
- create or delete keypair - create or delete keypair
@ -58,7 +52,9 @@ options:
aliases: [] aliases: []
version_added: "1.6" version_added: "1.6"
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
author: "Vincent Viallet (@zbal)" author: "Vincent Viallet (@zbal)"
''' '''

@ -55,11 +55,6 @@ options:
description: description:
- A list of security groups into which instances should be found - A list of security groups into which instances should be found
required: false required: false
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
volumes: volumes:
description: description:
- a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. - a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume.
@ -128,7 +123,9 @@ options:
required: false required: false
default: null default: null
version_added: "2.0" version_added: "2.0"
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
""" """
EXAMPLES = ''' EXAMPLES = '''

@ -89,7 +89,9 @@ options:
description: description:
- A list of the names of action(s) to take when the alarm is in the 'ok' status - A list of the names of action(s) to take when the alarm is in the 'ok' status
required: false required: false
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
""" """
EXAMPLES = ''' EXAMPLES = '''
@ -115,9 +117,6 @@ EXAMPLES = '''
import sys import sys
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try: try:
import boto.ec2.cloudwatch import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
@ -184,7 +183,7 @@ def create_metric_alarm(connection, module):
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'} comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison] alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions', {}) dim1 = module.params.get('dimensions')
dim2 = alarm.dimensions dim2 = alarm.dimensions
for keys in dim1: for keys in dim1:
@ -255,12 +254,11 @@ def main():
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']), unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'), evaluation_periods=dict(type='int'),
description=dict(type='str'), description=dict(type='str'),
dimensions=dict(type='dict'), dimensions=dict(type='dict', default={}),
alarm_actions=dict(type='list'), alarm_actions=dict(type='list'),
insufficient_data_actions=dict(type='list'), insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'), ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']), state=dict(default='present', choices=['present', 'absent']),
region=dict(aliases=['aws_region', 'ec2_region']),
) )
) )
@ -272,14 +270,22 @@ def main():
state = module.params.get('state') state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module) region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try: try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params) connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e: except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'present': if state == 'present':
create_metric_alarm(connection, module) create_metric_alarm(connection, module)
elif state == 'absent': elif state == 'absent':
delete_metric_alarm(connection, module) delete_metric_alarm(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main() main()

@ -53,7 +53,9 @@ options:
description: description:
- The minimum period of time between which autoscaling actions can take place - The minimum period of time between which autoscaling actions can take place
required: false required: false
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
""" """
EXAMPLES = ''' EXAMPLES = '''

@ -22,11 +22,6 @@ description:
- creates an EC2 snapshot from an existing EBS volume - creates an EC2 snapshot from an existing EBS volume
version_added: "1.5" version_added: "1.5"
options: options:
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
volume_id: volume_id:
description: description:
- volume from which to take the snapshot - volume from which to take the snapshot
@ -82,7 +77,9 @@ options:
version_added: "1.9" version_added: "1.9"
author: "Will Thames (@willthames)" author: "Will Thames (@willthames)"
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
''' '''
EXAMPLES = ''' EXAMPLES = '''

@ -22,12 +22,6 @@ description:
- Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto. - Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto.
version_added: "1.3" version_added: "1.3"
options: options:
region:
description:
- region in which the resource exists.
required: false
default: null
aliases: ['aws_region', 'ec2_region']
resource: resource:
description: description:
- The EC2 resource id. - The EC2 resource id.
@ -49,7 +43,9 @@ options:
aliases: [] aliases: []
author: "Lester Wade (@lwade)" author: "Lester Wade (@lwade)"
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
''' '''
EXAMPLES = ''' EXAMPLES = '''
@ -73,7 +69,7 @@ tasks:
Env: production Env: production
exact_count: 1 exact_count: 1
group: "{{ security_group }}" group: "{{ security_group }}"
keypair: ""{{ keypair }}" keypair: "{{ keypair }}"
image: "{{ image_id }}" image: "{{ image_id }}"
instance_tags: instance_tags:
Name: dbserver Name: dbserver

@ -74,12 +74,6 @@ options:
required: false required: false
default: null default: null
aliases: [] aliases: []
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: ['aws_region', 'ec2_region']
zone: zone:
description: description:
- zone in which to create the volume, if unset uses the zone the instance is in (if set) - zone in which to create the volume, if unset uses the zone the instance is in (if set)
@ -108,7 +102,9 @@ options:
choices: ['absent', 'present', 'list'] choices: ['absent', 'present', 'list']
version_added: "1.6" version_added: "1.6"
author: "Lester Wade (@lwade)" author: "Lester Wade (@lwade)"
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
''' '''
EXAMPLES = ''' EXAMPLES = '''
@ -409,7 +405,8 @@ def main():
'attachment_set': { 'attachment_set': {
'attach_time': attachment.attach_time, 'attach_time': attachment.attach_time,
'device': attachment.device, 'device': attachment.device,
'status': attachment.status 'status': attachment.status,
'deleteOnTermination': attachment.deleteOnTermination
} }
}) })

@ -94,14 +94,10 @@ options:
required: true required: true
default: present default: present
aliases: [] aliases: []
region:
description:
- The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: ['aws_region', 'ec2_region']
author: "Carson Gee (@carsongee)" author: "Carson Gee (@carsongee)"
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
''' '''
EXAMPLES = ''' EXAMPLES = '''

@ -72,7 +72,9 @@ options:
default: false default: false
required: false required: false
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
''' '''
EXAMPLES = ''' EXAMPLES = '''

@ -57,6 +57,12 @@ options:
- The port number on which each of the cache nodes will accept connections - The port number on which each of the cache nodes will accept connections
required: false required: false
default: none default: none
parameter_group:
description:
- Specify non-default parameter group names to be associated with cache cluster
required: false
default: None
version_added: "2.0"
cache_subnet_group: cache_subnet_group:
description: description:
- The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc - The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc
@ -91,13 +97,9 @@ options:
required: false required: false
default: no default: no
choices: [ "yes", "no" ] choices: [ "yes", "no" ]
region: extends_documentation_fragment:
description: - aws
- The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used. - ec2
required: true
default: null
aliases: ['aws_region', 'ec2_region']
extends_documentation_fragment: aws
""" """
EXAMPLES = """ EXAMPLES = """
@ -148,7 +150,7 @@ class ElastiCacheManager(object):
EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying'] EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
def __init__(self, module, name, engine, cache_engine_version, node_type, def __init__(self, module, name, engine, cache_engine_version, node_type,
num_nodes, cache_port, cache_subnet_group, num_nodes, cache_port, parameter_group, cache_subnet_group,
cache_security_groups, security_group_ids, zone, wait, cache_security_groups, security_group_ids, zone, wait,
hard_modify, region, **aws_connect_kwargs): hard_modify, region, **aws_connect_kwargs):
self.module = module self.module = module
@ -158,6 +160,7 @@ class ElastiCacheManager(object):
self.node_type = node_type self.node_type = node_type
self.num_nodes = num_nodes self.num_nodes = num_nodes
self.cache_port = cache_port self.cache_port = cache_port
self.parameter_group = parameter_group
self.cache_subnet_group = cache_subnet_group self.cache_subnet_group = cache_subnet_group
self.cache_security_groups = cache_security_groups self.cache_security_groups = cache_security_groups
self.security_group_ids = security_group_ids self.security_group_ids = security_group_ids
@ -216,6 +219,7 @@ class ElastiCacheManager(object):
engine_version=self.cache_engine_version, engine_version=self.cache_engine_version,
cache_security_group_names=self.cache_security_groups, cache_security_group_names=self.cache_security_groups,
security_group_ids=self.security_group_ids, security_group_ids=self.security_group_ids,
cache_parameter_group_name=self.parameter_group,
cache_subnet_group_name=self.cache_subnet_group, cache_subnet_group_name=self.cache_subnet_group,
preferred_availability_zone=self.zone, preferred_availability_zone=self.zone,
port=self.cache_port) port=self.cache_port)
@ -291,6 +295,7 @@ class ElastiCacheManager(object):
num_cache_nodes=self.num_nodes, num_cache_nodes=self.num_nodes,
cache_node_ids_to_remove=nodes_to_remove, cache_node_ids_to_remove=nodes_to_remove,
cache_security_group_names=self.cache_security_groups, cache_security_group_names=self.cache_security_groups,
cache_parameter_group_name=self.parameter_group,
security_group_ids=self.security_group_ids, security_group_ids=self.security_group_ids,
apply_immediately=True, apply_immediately=True,
engine_version=self.cache_engine_version) engine_version=self.cache_engine_version)
@ -437,6 +442,7 @@ class ElastiCacheManager(object):
def _refresh_data(self, cache_cluster_data=None): def _refresh_data(self, cache_cluster_data=None):
"""Refresh data about this cache cluster""" """Refresh data about this cache cluster"""
if cache_cluster_data is None: if cache_cluster_data is None:
try: try:
response = self.conn.describe_cache_clusters(cache_cluster_id=self.name, response = self.conn.describe_cache_clusters(cache_cluster_id=self.name,
@ -480,6 +486,7 @@ def main():
cache_engine_version={'required': False}, cache_engine_version={'required': False},
node_type={'required': False, 'default': 'cache.m1.small'}, node_type={'required': False, 'default': 'cache.m1.small'},
num_nodes={'required': False, 'default': None, 'type': 'int'}, num_nodes={'required': False, 'default': None, 'type': 'int'},
parameter_group={'required': False, 'default': None},
cache_port={'required': False, 'type': 'int'}, cache_port={'required': False, 'type': 'int'},
cache_subnet_group={'required': False, 'default': None}, cache_subnet_group={'required': False, 'default': None},
cache_security_groups={'required': False, 'default': [default], cache_security_groups={'required': False, 'default': [default],
@ -514,6 +521,7 @@ def main():
zone = module.params['zone'] zone = module.params['zone']
wait = module.params['wait'] wait = module.params['wait']
hard_modify = module.params['hard_modify'] hard_modify = module.params['hard_modify']
parameter_group = module.params['parameter_group']
if cache_subnet_group and cache_security_groups == [default]: if cache_subnet_group and cache_security_groups == [default]:
cache_security_groups = [] cache_security_groups = []
@ -532,6 +540,7 @@ def main():
elasticache_manager = ElastiCacheManager(module, name, engine, elasticache_manager = ElastiCacheManager(module, name, engine,
cache_engine_version, node_type, cache_engine_version, node_type,
num_nodes, cache_port, num_nodes, cache_port,
parameter_group,
cache_subnet_group, cache_subnet_group,
cache_security_groups, cache_security_groups,
security_group_ids, zone, wait, security_group_ids, zone, wait,

@ -42,13 +42,10 @@ options:
- List of subnet IDs that make up the Elasticache subnet group. - List of subnet IDs that make up the Elasticache subnet group.
required: false required: false
default: null default: null
region:
description:
- The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
required: true
aliases: ['aws_region', 'ec2_region']
author: "Tim Mahoney (@timmahoney)" author: "Tim Mahoney (@timmahoney)"
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
''' '''
EXAMPLES = ''' EXAMPLES = '''

@ -100,7 +100,9 @@ notes:
author: author:
- "Jonathan I. Davila (@defionscode)" - "Jonathan I. Davila (@defionscode)"
- "Paul Seiffert (@seiffert)" - "Paul Seiffert (@seiffert)"
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
''' '''
EXAMPLES = ''' EXAMPLES = '''

@ -85,7 +85,9 @@ options:
requirements: [ "boto" ] requirements: [ "boto" ]
author: Jonathan I. Davila author: Jonathan I. Davila
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
''' '''
EXAMPLES = ''' EXAMPLES = '''
@ -241,13 +243,10 @@ def main():
if not HAS_BOTO: if not HAS_BOTO:
module.fail_json(msg="Boto is required for this module") module.fail_json(msg="Boto is required for this module")
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try: try:
iam = boto.iam.connection.IAMConnection( iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
)
except boto.exception.NoAuthHandlerFound, e: except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))

@ -27,58 +27,40 @@ options:
required: true required: true
default: null default: null
choices: [ "user", "group", "role"] choices: [ "user", "group", "role"]
aliases: []
iam_name: iam_name:
description: description:
- Name of IAM resource you wish to target for policy actions. In other words, the user name, group name or role name. - Name of IAM resource you wish to target for policy actions. In other words, the user name, group name or role name.
required: true required: true
aliases: []
policy_name: policy_name:
description: description:
- The name label for the policy to create or remove. - The name label for the policy to create or remove.
required: false required: true
aliases: []
policy_document: policy_document:
description: description:
- The path to the properly json formatted policy file (mutually exclusive with C(policy_json)) - The path to the properly json formatted policy file (mutually exclusive with C(policy_json))
required: false required: false
aliases: []
policy_json: policy_json:
description: description:
- A properly json formatted policy as string (mutually exclusive with C(policy_document), see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly) - A properly json formatted policy as string (mutually exclusive with C(policy_document), see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly)
required: false required: false
aliases: []
state: state:
description: description:
- Whether to create or delete the IAM policy. - Whether to create or delete the IAM policy.
required: true required: true
default: null default: null
choices: [ "present", "absent"] choices: [ "present", "absent"]
aliases: []
skip_duplicates: skip_duplicates:
description: description:
- By default the module looks for any policies that match the document you pass in, if there is a match it will not make a new policy object with the same rules. You can override this by specifying false which would allow for two policy objects with different names but same rules. - By default the module looks for any policies that match the document you pass in, if there is a match it will not make a new policy object with the same rules. You can override this by specifying false which would allow for two policy objects with different names but same rules.
required: false required: false
default: "/" default: "/"
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
requirements: [ "boto" ]
notes: notes:
- 'Currently boto does not support the removal of Managed Policies, the module will not work removing/adding managed policies.' - 'Currently boto does not support the removal of Managed Policies, the module will not work removing/adding managed policies.'
author: "Jonathan I. Davila (@defionscode)" author: "Jonathan I. Davila (@defionscode)"
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
''' '''
EXAMPLES = ''' EXAMPLES = '''
@ -289,7 +271,7 @@ def main():
iam_name=dict(default=None, required=False), iam_name=dict(default=None, required=False),
policy_name=dict(default=None, required=True), policy_name=dict(default=None, required=True),
policy_document=dict(default=None, required=False), policy_document=dict(default=None, required=False),
policy_json=dict(type='str', default=None, required=False), policy_json=dict(default=None, required=False),
skip_duplicates=dict(type='bool', default=True, required=False) skip_duplicates=dict(type='bool', default=True, required=False)
)) ))

@ -917,6 +917,7 @@ def validate_parameters(required_vars, valid_vars, module):
'subnet': 'db_subnet_group_name', 'subnet': 'db_subnet_group_name',
'license_model': 'license_model', 'license_model': 'license_model',
'option_group': 'option_group_name', 'option_group': 'option_group_name',
'size': 'allocated_storage',
'iops': 'iops', 'iops': 'iops',
'new_instance_name': 'new_instance_id', 'new_instance_name': 'new_instance_id',
'apply_immediately': 'apply_immediately', 'apply_immediately': 'apply_immediately',

@ -61,14 +61,10 @@ options:
default: null default: null
aliases: [] aliases: []
choices: [ 'mysql5.1', 'mysql5.5', 'mysql5.6', 'oracle-ee-11.2', 'oracle-se-11.2', 'oracle-se1-11.2', 'postgres9.3', 'postgres9.4', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0'] choices: [ 'mysql5.1', 'mysql5.5', 'mysql5.6', 'oracle-ee-11.2', 'oracle-se-11.2', 'oracle-se1-11.2', 'postgres9.3', 'postgres9.4', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0']
region:
description:
- The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: ['aws_region', 'ec2_region']
author: "Scott Anderson (@tastychutney)" author: "Scott Anderson (@tastychutney)"
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
''' '''
EXAMPLES = ''' EXAMPLES = '''

@ -47,14 +47,10 @@ options:
required: false required: false
default: null default: null
aliases: [] aliases: []
region:
description:
- The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: ['aws_region', 'ec2_region']
author: "Scott Anderson (@tastychutney)" author: "Scott Anderson (@tastychutney)"
extends_documentation_fragment: aws extends_documentation_fragment:
- aws
- ec2
''' '''
EXAMPLES = ''' EXAMPLES = '''

@ -486,7 +486,7 @@ def main():
# First, we check to see if the bucket exists, we get "bucket" returned. # First, we check to see if the bucket exists, we get "bucket" returned.
bucketrtn = bucket_check(module, s3, bucket) bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False: if bucketrtn is False:
module.fail_json(msg="Target bucket cannot be found", failed=True) module.fail_json(msg="Source bucket cannot be found", failed=True)
# Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check. # Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
keyrtn = key_check(module, s3, bucket, obj, version=version) keyrtn = key_check(module, s3, bucket, obj, version=version)

@ -249,22 +249,29 @@ AZURE_ROLE_SIZES = ['ExtraSmall',
'Standard_G4', 'Standard_G4',
'Standard_G5'] 'Standard_G5']
from distutils.version import LooseVersion
try: try:
import azure as windows_azure import azure as windows_azure
from azure import WindowsAzureError, WindowsAzureMissingResourceError if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.11.1":
from azure import WindowsAzureError as AzureException
from azure import WindowsAzureMissingResourceError as AzureMissingException
else:
from azure.common import AzureException as AzureException
from azure.common import AzureMissingResourceHttpError as AzureMissingException
from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys, from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys,
PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints, PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints,
ConfigurationSetInputEndpoint, Listener, WindowsConfigurationSet) ConfigurationSetInputEndpoint, Listener, WindowsConfigurationSet)
HAS_AZURE = True HAS_AZURE = True
except ImportError: except ImportError:
HAS_AZURE = False HAS_AZURE = False
from distutils.version import LooseVersion
from types import MethodType from types import MethodType
import json import json
def _wait_for_completion(azure, promise, wait_timeout, msg): def _wait_for_completion(azure, promise, wait_timeout, msg):
if not promise: return if not promise: return
wait_timeout = time.time() + wait_timeout wait_timeout = time.time() + wait_timeout
@ -274,7 +281,7 @@ def _wait_for_completion(azure, promise, wait_timeout, msg):
if operation_result.status == "Succeeded": if operation_result.status == "Succeeded":
return return
raise WindowsAzureError('Timed out waiting for async operation ' + msg + ' "' + str(promise.request_id) + '" to complete.') raise AzureException('Timed out waiting for async operation ' + msg + ' "' + str(promise.request_id) + '" to complete.')
def _delete_disks_when_detached(azure, wait_timeout, disk_names): def _delete_disks_when_detached(azure, wait_timeout, disk_names):
def _handle_timeout(signum, frame): def _handle_timeout(signum, frame):
@ -289,7 +296,7 @@ def _delete_disks_when_detached(azure, wait_timeout, disk_names):
if disk.attached_to is None: if disk.attached_to is None:
azure.delete_disk(disk.name, True) azure.delete_disk(disk.name, True)
disk_names.remove(disk_name) disk_names.remove(disk_name)
except WindowsAzureError, e: except AzureException, e:
module.fail_json(msg="failed to get or delete disk, error was: %s" % (disk_name, str(e))) module.fail_json(msg="failed to get or delete disk, error was: %s" % (disk_name, str(e)))
finally: finally:
signal.alarm(0) signal.alarm(0)
@ -347,13 +354,13 @@ def create_virtual_machine(module, azure):
result = azure.create_hosted_service(service_name=name, label=name, location=location) result = azure.create_hosted_service(service_name=name, label=name, location=location)
_wait_for_completion(azure, result, wait_timeout, "create_hosted_service") _wait_for_completion(azure, result, wait_timeout, "create_hosted_service")
changed = True changed = True
except WindowsAzureError, e: except AzureException, e:
module.fail_json(msg="failed to create the new service, error was: %s" % str(e)) module.fail_json(msg="failed to create the new service, error was: %s" % str(e))
try: try:
# check to see if a vm with this name exists; if so, do nothing # check to see if a vm with this name exists; if so, do nothing
azure.get_role(name, name, name) azure.get_role(name, name, name)
except WindowsAzureMissingResourceError: except AzureMissingException:
# vm does not exist; create it # vm does not exist; create it
if os_type == 'linux': if os_type == 'linux':
@ -419,13 +426,13 @@ def create_virtual_machine(module, azure):
virtual_network_name=virtual_network_name) virtual_network_name=virtual_network_name)
_wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment") _wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment")
changed = True changed = True
except WindowsAzureError, e: except AzureException, e:
module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e)) module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e))
try: try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name) deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
return (changed, urlparse(deployment.url).hostname, deployment) return (changed, urlparse(deployment.url).hostname, deployment)
except WindowsAzureError, e: except AzureException, e:
module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e))) module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e)))
@ -453,9 +460,9 @@ def terminate_virtual_machine(module, azure):
disk_names = [] disk_names = []
try: try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name) deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
except WindowsAzureMissingResourceError, e: except AzureMissingException, e:
pass # no such deployment or service pass # no such deployment or service
except WindowsAzureError, e: except AzureException, e:
module.fail_json(msg="failed to find the deployment, error was: %s" % str(e)) module.fail_json(msg="failed to find the deployment, error was: %s" % str(e))
# Delete deployment # Delete deployment
@ -468,13 +475,13 @@ def terminate_virtual_machine(module, azure):
role_props = azure.get_role(name, deployment.name, role.role_name) role_props = azure.get_role(name, deployment.name, role.role_name)
if role_props.os_virtual_hard_disk.disk_name not in disk_names: if role_props.os_virtual_hard_disk.disk_name not in disk_names:
disk_names.append(role_props.os_virtual_hard_disk.disk_name) disk_names.append(role_props.os_virtual_hard_disk.disk_name)
except WindowsAzureError, e: except AzureException, e:
module.fail_json(msg="failed to get the role %s, error was: %s" % (role.role_name, str(e))) module.fail_json(msg="failed to get the role %s, error was: %s" % (role.role_name, str(e)))
try: try:
result = azure.delete_deployment(name, deployment.name) result = azure.delete_deployment(name, deployment.name)
_wait_for_completion(azure, result, wait_timeout, "delete_deployment") _wait_for_completion(azure, result, wait_timeout, "delete_deployment")
except WindowsAzureError, e: except AzureException, e:
module.fail_json(msg="failed to delete the deployment %s, error was: %s" % (deployment.name, str(e))) module.fail_json(msg="failed to delete the deployment %s, error was: %s" % (deployment.name, str(e)))
# It's unclear when disks associated with terminated deployment get detatched. # It's unclear when disks associated with terminated deployment get detatched.
@ -482,14 +489,14 @@ def terminate_virtual_machine(module, azure):
# become detatched by polling the list of remaining disks and examining the state. # become detatched by polling the list of remaining disks and examining the state.
try: try:
_delete_disks_when_detached(azure, wait_timeout, disk_names) _delete_disks_when_detached(azure, wait_timeout, disk_names)
except (WindowsAzureError, TimeoutError), e: except (AzureException, TimeoutError), e:
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))
try: try:
# Now that the vm is deleted, remove the cloud service # Now that the vm is deleted, remove the cloud service
result = azure.delete_hosted_service(service_name=name) result = azure.delete_hosted_service(service_name=name)
_wait_for_completion(azure, result, wait_timeout, "delete_hosted_service") _wait_for_completion(azure, result, wait_timeout, "delete_hosted_service")
except WindowsAzureError, e: except AzureException, e:
module.fail_json(msg="failed to delete the service %s, error was: %s" % (name, str(e))) module.fail_json(msg="failed to delete the service %s, error was: %s" % (name, str(e)))
public_dns_name = urlparse(deployment.url).hostname public_dns_name = urlparse(deployment.url).hostname
@ -545,7 +552,8 @@ def main():
subscription_id, management_cert_path = get_azure_creds(module) subscription_id, management_cert_path = get_azure_creds(module)
wait_timeout_redirects = int(module.params.get('wait_timeout_redirects')) wait_timeout_redirects = int(module.params.get('wait_timeout_redirects'))
if LooseVersion(windows_azure.__version__) <= "0.8.0":
if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.8.0":
# wrapper for handling redirects which the sdk <= 0.8.0 is not following # wrapper for handling redirects which the sdk <= 0.8.0 is not following
azure = Wrapper(ServiceManagementService(subscription_id, management_cert_path), wait_timeout_redirects) azure = Wrapper(ServiceManagementService(subscription_id, management_cert_path), wait_timeout_redirects)
else: else:
@ -597,7 +605,7 @@ class Wrapper(object):
while wait_timeout > time.time(): while wait_timeout > time.time():
try: try:
return f() return f()
except WindowsAzureError, e: except AzureException, e:
if not str(e).lower().find("temporary redirect") == -1: if not str(e).lower().find("temporary redirect") == -1:
time.sleep(5) time.sleep(5)
pass pass

@ -195,7 +195,7 @@ def core(module):
records = domain.records() records = domain.records()
at_record = None at_record = None
for record in records: for record in records:
if record.name == "@": if record.name == "@" and record.record_type == 'A':
at_record = record at_record = record
if not at_record.data == getkeyordie("ip"): if not at_record.data == getkeyordie("ip"):

@ -97,9 +97,12 @@ options:
- You can specify a different logging driver for the container than for the daemon. - You can specify a different logging driver for the container than for the daemon.
"json-file" Default logging driver for Docker. Writes JSON messages to file. "json-file" Default logging driver for Docker. Writes JSON messages to file.
docker logs command is available only for this logging driver. docker logs command is available only for this logging driver.
"none" disables any logging for the container. docker logs won't be available with this driver. "none" disables any logging for the container.
"syslog" Syslog logging driver for Docker. Writes log messages to syslog. "syslog" Syslog logging driver for Docker. Writes log messages to syslog.
docker logs command is not available for this logging driver. docker logs command is not available for this logging driver.
"journald" Journald logging driver for Docker. Writes log messages to "journald".
"gelf" Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash.
"fluentd" Fluentd logging driver for Docker. Writes log messages to "fluentd" (forward input).
If not defined explicitly, the Docker daemon's default ("json-file") will apply. If not defined explicitly, the Docker daemon's default ("json-file") will apply.
Requires docker >= 1.6.0. Requires docker >= 1.6.0.
required: false required: false
@ -108,11 +111,14 @@ options:
- json-file - json-file
- none - none
- syslog - syslog
- journald
- gelf
- fluentd
version_added: "2.0" version_added: "2.0"
log_opt: log_opt:
description: description:
- Additional options to pass to the logging driver selected above. See Docker log-driver - Additional options to pass to the logging driver selected above. See Docker `log-driver
documentation for more information (https://docs.docker.com/reference/logging/overview/). <https://docs.docker.com/reference/logging/overview/>` documentation for more information.
Requires docker >=1.7.0. Requires docker >=1.7.0.
required: false required: false
default: null default: null
@ -1056,11 +1062,11 @@ class DockerManager(object):
continue continue
# EXPOSED PORTS # EXPOSED PORTS
expected_exposed_ports = set((image['ContainerConfig']['ExposedPorts'] or {}).keys()) expected_exposed_ports = set((image['ContainerConfig'].get('ExposedPorts') or {}).keys())
for p in (self.exposed_ports or []): for p in (self.exposed_ports or []):
expected_exposed_ports.add("/".join(p)) expected_exposed_ports.add("/".join(p))
actually_exposed_ports = set((container["Config"]["ExposedPorts"] or {}).keys()) actually_exposed_ports = set((container["Config"].get("ExposedPorts") or {}).keys())
if actually_exposed_ports != expected_exposed_ports: if actually_exposed_ports != expected_exposed_ports:
self.reload_reasons.append('exposed_ports ({0} => {1})'.format(actually_exposed_ports, expected_exposed_ports)) self.reload_reasons.append('exposed_ports ({0} => {1})'.format(actually_exposed_ports, expected_exposed_ports))
@ -1386,6 +1392,11 @@ class DockerManager(object):
changes = list(self.client.pull(image, tag=tag, stream=True, **extra_params)) changes = list(self.client.pull(image, tag=tag, stream=True, **extra_params))
try: try:
last = changes[-1] last = changes[-1]
# seems Docker 1.8 puts an empty dict at the end of the
# stream; catch that and get the previous instead
# https://github.com/ansible/ansible-modules-core/issues/2043
if last.strip() == '{}':
last = changes[-2]
except IndexError: except IndexError:
last = '{}' last = '{}'
status = json.loads(last).get('status', '') status = json.loads(last).get('status', '')
@ -1662,7 +1673,7 @@ def main():
net = dict(default=None), net = dict(default=None),
pid = dict(default=None), pid = dict(default=None),
insecure_registry = dict(default=False, type='bool'), insecure_registry = dict(default=False, type='bool'),
log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']), log_driver = dict(default=None, choices=['json-file', 'none', 'syslog', 'journald', 'gelf', 'fluentd']),
log_opt = dict(default=None, type='dict'), log_opt = dict(default=None, type='dict'),
cpu_set = dict(default=None), cpu_set = dict(default=None),
cap_add = dict(default=None, type='list'), cap_add = dict(default=None, type='list'),

@ -31,6 +31,7 @@ DOCUMENTATION = '''
module: quantum_router module: quantum_router
version_added: "1.2" version_added: "1.2"
author: "Benno Joy (@bennojoy)" author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use os_router instead
short_description: Create or Remove router from openstack short_description: Create or Remove router from openstack
description: description:
- Create or Delete routers from OpenStack - Create or Delete routers from OpenStack

@ -31,6 +31,7 @@ DOCUMENTATION = '''
module: quantum_router_gateway module: quantum_router_gateway
version_added: "1.2" version_added: "1.2"
author: "Benno Joy (@bennojoy)" author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use os_router instead
short_description: set/unset a gateway interface for the router with the specified external network short_description: set/unset a gateway interface for the router with the specified external network
description: description:
- Creates/Removes a gateway interface from the router, used to associate a external network with a router to route external traffic. - Creates/Removes a gateway interface from the router, used to associate a external network with a router to route external traffic.

@ -31,6 +31,7 @@ DOCUMENTATION = '''
module: quantum_router_interface module: quantum_router_interface
version_added: "1.2" version_added: "1.2"
author: "Benno Joy (@bennojoy)" author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use os_router instead
short_description: Attach/Dettach a subnet's interface to a router short_description: Attach/Dettach a subnet's interface to a router
description: description:
- Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet. - Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet.

@ -0,0 +1,158 @@
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
module: os_image_facts
short_description: Retrieve facts about an image within OpenStack.
version_added: "2.0"
author: "Davide Agnello (@dagnello)"
description:
- Retrieve facts about a image image from OpenStack.
notes:
- Facts are placed in the C(openstack) variable.
requirements:
- "python >= 2.6"
- "shade"
options:
image:
description:
- Name or ID of the image
required: true
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Gather facts about a previously created image named image1
- os_image_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
image: image1
- debug: var=openstack
'''
RETURN = '''
openstack_image:
description: has all the openstack facts about the image
returned: always, but can be null
type: complex
contains:
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the image.
returned: success
type: string
status:
description: Image status.
returned: success
type: string
created_at:
description: Image created at timestamp.
returned: success
type: string
deleted:
description: Image deleted flag.
returned: success
type: boolean
container_format:
description: Container format of the image.
returned: success
type: string
min_ram:
description: Min amount of RAM required for this image.
returned: success
type: int
disk_format:
description: Disk format of the image.
returned: success
type: string
updated_at:
description: Image updated at timestamp.
returned: success
type: string
properties:
description: Additional properties associated with the image.
returned: success
type: dict
min_disk:
description: Min amount of disk space required for this image.
returned: success
type: int
protected:
description: Image protected flag.
returned: success
type: boolean
checksum:
description: Checksum for the image.
returned: success
type: string
owner:
description: Owner for the image.
returned: success
type: string
is_public:
description: Is plubic flag of the image.
returned: success
type: boolean
deleted_at:
description: Image deleted at timestamp.
returned: success
type: string
size:
description: Size of the image.
returned: success
type: int
'''
def main():
argument_spec = openstack_full_argument_spec(
image=dict(required=True),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
image = cloud.get_image(module.params['image'])
module.exit_json(changed=False, ansible_facts=dict(
openstack_image=image))
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

@ -25,12 +25,12 @@ except ImportError:
DOCUMENTATION = ''' DOCUMENTATION = '''
--- ---
module: os_network module: os_network
short_description: Creates/Removes networks from OpenStack short_description: Creates/removes networks from OpenStack
extends_documentation_fragment: openstack extends_documentation_fragment: openstack
version_added: "2.0" version_added: "2.0"
author: "Monty Taylor (@emonty)" author: "Monty Taylor (@emonty)"
description: description:
- Add or Remove network from OpenStack. - Add or remove network from OpenStack.
options: options:
name: name:
description: description:
@ -46,6 +46,11 @@ options:
- Whether the state should be marked as up or down. - Whether the state should be marked as up or down.
required: false required: false
default: true default: true
external:
description:
- Whether this network is externally accessible.
required: false
default: false
state: state:
description: description:
- Indicate desired state of the resource. - Indicate desired state of the resource.
@ -56,14 +61,60 @@ requirements: ["shade"]
''' '''
EXAMPLES = ''' EXAMPLES = '''
# Create an externally accessible network named 'ext_network'.
- os_network: - os_network:
name: t1network cloud: mycloud
state: present state: present
auth: name: ext_network
auth_url: https://your_api_url.com:9000/v2.0 external: true
username: user '''
password: password
project_name: someproject RETURN = '''
network:
description: Dictionary describing the network.
returned: On success when I(state) is 'present'.
type: dictionary
contains:
id:
description: Network ID.
type: string
sample: "4bb4f9a5-3bd2-4562-bf6a-d17a6341bb56"
name:
description: Network name.
type: string
sample: "ext_network"
shared:
description: Indicates whether this network is shared across all tenants.
type: bool
sample: false
status:
description: Network status.
type: string
sample: "ACTIVE"
mtu:
description: The MTU of a network resource.
type: integer
sample: 0
admin_state_up:
description: The administrative state of the network.
type: bool
sample: true
port_security_enabled:
description: The port security status
type: bool
sample: true
router:external:
description: Indicates whether this network is externally accessible.
type: bool
sample: true
tenant_id:
description: The tenant ID.
type: string
sample: "06820f94b9f54b119636be2728d216fc"
subnets:
description: The associated subnets.
type: list
sample: []
''' '''
@ -72,6 +123,7 @@ def main():
name=dict(required=True), name=dict(required=True),
shared=dict(default=False, type='bool'), shared=dict(default=False, type='bool'),
admin_state_up=dict(default=True, type='bool'), admin_state_up=dict(default=True, type='bool'),
external=dict(default=False, type='bool'),
state=dict(default='present', choices=['absent', 'present']), state=dict(default='present', choices=['absent', 'present']),
) )
@ -85,6 +137,7 @@ def main():
name = module.params['name'] name = module.params['name']
shared = module.params['shared'] shared = module.params['shared']
admin_state_up = module.params['admin_state_up'] admin_state_up = module.params['admin_state_up']
external = module.params['external']
try: try:
cloud = shade.openstack_cloud(**module.params) cloud = shade.openstack_cloud(**module.params)
@ -92,7 +145,7 @@ def main():
if state == 'present': if state == 'present':
if not net: if not net:
net = cloud.create_network(name, shared, admin_state_up) net = cloud.create_network(name, shared, admin_state_up, external)
module.exit_json(changed=False, network=net, id=net['id']) module.exit_json(changed=False, network=net, id=net['id'])
elif state == 'absent': elif state == 'absent':
@ -109,4 +162,5 @@ def main():
# this is magic, see lib/ansible/module_common.py # this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
from ansible.module_utils.openstack import * from ansible.module_utils.openstack import *
main() if __name__ == "__main__":
main()

@ -0,0 +1,141 @@
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_networks_facts
short_description: Retrieve facts about one or more OpenStack networks.
version_added: "2.0"
author: "Davide Agnello (@dagnello)"
description:
- Retrieve facts about one or more networks from OpenStack.
requirements:
- "python >= 2.6"
- "shade"
options:
network:
description:
- Name or ID of the Network
required: false
filters:
description:
- A dictionary of meta data to use for further filtering. Elements of
this dictionary may be additional dictionaries.
required: false
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Gather facts about previously created networks
- os_networks_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
- debug: var=openstack_networks
# Gather facts about a previously created network by name
- os_networks_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
name: network1
- debug: var=openstack_networks
# Gather facts about a previously created network with filter (note: name and
filters parameters are Not mutually exclusive)
- os_networks_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
filters:
tenant_id: 55e2ce24b2a245b09f181bf025724cbe
subnets:
- 057d4bdf-6d4d-4728-bb0f-5ac45a6f7400
- 443d4dc0-91d4-4998-b21c-357d10433483
- debug: var=openstack_networks
'''
RETURN = '''
openstack_networks:
description: has all the openstack facts about the networks
returned: always, but can be null
type: complex
contains:
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the network.
returned: success
type: string
status:
description: Network status.
returned: success
type: string
subnets:
description: Subnet(s) included in this network.
returned: success
type: list of strings
tenant_id:
description: Tenant id associated with this network.
returned: success
type: string
shared:
description: Network shared flag.
returned: success
type: boolean
'''
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
filters=dict(required=False, default=None)
)
module = AnsibleModule(argument_spec)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
networks = cloud.search_networks(module.params['name'],
module.params['filters'])
module.exit_json(changed=False, ansible_facts=dict(
openstack_networks=networks))
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

@ -0,0 +1,395 @@
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_port
short_description: Add/Update/Delete ports from an OpenStack cloud.
extends_documentation_fragment: openstack
author: "Davide Agnello (@dagnello)"
version_added: "2.0"
description:
- Add, Update or Remove ports from an OpenStack cloud. A state=present,
will ensure the port is created or updated if required.
options:
network:
description:
- Network ID or name this port belongs to.
required: true
name:
description:
- Name that has to be given to the port.
required: false
default: None
fixed_ips:
description:
- Desired IP and/or subnet for this port. Subnet is referenced by
subnet_id and IP is referenced by ip_address.
required: false
default: None
admin_state_up:
description:
- Sets admin state.
required: false
default: None
mac_address:
description:
- MAC address of this port.
required: false
default: None
security_groups:
description:
- Security group(s) ID(s) or name(s) associated with the port (comma
separated for multiple security groups - no spaces between comma(s)
or YAML list).
required: false
default: None
no_security_groups:
description:
- Do not associate a security group with this port.
required: false
default: False
allowed_address_pairs:
description:
- Allowed address pairs list. Allowed address pairs are supported with
dictionary structure.
e.g. allowed_address_pairs:
- ip_address: 10.1.0.12
mac_address: ab:cd:ef:12:34:56
- ip_address: ...
required: false
default: None
extra_dhcp_opt:
description:
- Extra dhcp options to be assigned to this port. Extra options are
supported with dictionary structure.
e.g. extra_dhcp_opt:
- opt_name: opt name1
opt_value: value1
- opt_name: ...
required: false
default: None
device_owner:
description:
- The ID of the entity that uses this port.
required: false
default: None
device_id:
description:
- Device ID of device using this port.
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
'''
EXAMPLES = '''
# Create a port
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
# Create a port with a static IP
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
fixed_ips:
- ip_address: 10.1.0.21
# Create a port with No security groups
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: port1
network: foo
no_security_groups: True
# Update the existing 'port1' port with multiple security groups (version 1)
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/d
username: admin
password: admin
project_name: admin
name: port1
security_groups: 1496e8c7-4918-482a-9172-f4f00fc4a3a5,057d4bdf-6d4d-472...
# Update the existing 'port1' port with multiple security groups (version 2)
- os_port:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/d
username: admin
password: admin
project_name: admin
name: port1
security_groups:
- 1496e8c7-4918-482a-9172-f4f00fc4a3a5
- 057d4bdf-6d4d-472...
'''
RETURN = '''
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the port.
returned: success
type: string
network_id:
description: Network ID this port belongs in.
returned: success
type: string
security_groups:
description: Security group(s) associated with this port.
returned: success
type: list of strings
status:
description: Port's status.
returned: success
type: string
fixed_ips:
description: Fixed ip(s) associated with this port.
returned: success
type: list of dicts
tenant_id:
description: Tenant id associated with this port.
returned: success
type: string
allowed_address_pairs:
description: Allowed address pairs with this port.
returned: success
type: list of dicts
admin_state_up:
description: Admin state up flag for this port.
returned: success
type: bool
'''
def _needs_update(module, port, cloud):
"""Check for differences in the updatable values.
NOTE: We don't currently allow name updates.
"""
compare_simple = ['admin_state_up',
'mac_address',
'device_owner',
'device_id']
compare_dict = ['allowed_address_pairs',
'extra_dhcp_opt']
compare_comma_separated_list = ['security_groups']
for key in compare_simple:
if module.params[key] is not None and module.params[key] != port[key]:
return True
for key in compare_dict:
if module.params[key] is not None and cmp(module.params[key],
port[key]) != 0:
return True
for key in compare_comma_separated_list:
if module.params[key] is not None and (set(module.params[key]) !=
set(port[key])):
return True
# NOTE: if port was created or updated with 'no_security_groups=True',
# subsequent updates without 'no_security_groups' flag or
# 'no_security_groups=False' and no specified 'security_groups', will not
# result in an update to the port where the default security group is
# applied.
if module.params['no_security_groups'] and port['security_groups'] != []:
return True
if module.params['fixed_ips'] is not None:
for item in module.params['fixed_ips']:
if 'ip_address' in item:
# if ip_address in request does not match any in existing port,
# update is required.
if not any(match['ip_address'] == item['ip_address']
for match in port['fixed_ips']):
return True
if 'subnet_id' in item:
return True
for item in port['fixed_ips']:
# if ip_address in existing port does not match any in request,
# update is required.
if not any(match.get('ip_address') == item['ip_address']
for match in module.params['fixed_ips']):
return True
return False
def _system_state_change(module, port, cloud):
state = module.params['state']
if state == 'present':
if not port:
return True
return _needs_update(module, port, cloud)
if state == 'absent' and port:
return True
return False
def _compose_port_args(module, cloud):
port_kwargs = {}
optional_parameters = ['name',
'fixed_ips',
'admin_state_up',
'mac_address',
'security_groups',
'allowed_address_pairs',
'extra_dhcp_opt',
'device_owner',
'device_id']
for optional_param in optional_parameters:
if module.params[optional_param] is not None:
port_kwargs[optional_param] = module.params[optional_param]
if module.params['no_security_groups']:
port_kwargs['security_groups'] = []
return port_kwargs
def get_security_group_id(module, cloud, security_group_name_or_id):
security_group = cloud.get_security_group(security_group_name_or_id)
if not security_group:
module.fail_json(msg="Security group: %s, was not found"
% security_group_name_or_id)
return security_group['id']
def main():
argument_spec = openstack_full_argument_spec(
network=dict(required=False),
name=dict(required=False),
fixed_ips=dict(default=None),
admin_state_up=dict(default=None),
mac_address=dict(default=None),
security_groups=dict(default=None),
no_security_groups=dict(default=False, type='bool'),
allowed_address_pairs=dict(default=None),
extra_dhcp_opt=dict(default=None),
device_owner=dict(default=None),
device_id=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['no_security_groups', 'security_groups'],
]
)
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
state = module.params['state']
try:
cloud = shade.openstack_cloud(**module.params)
if module.params['security_groups']:
if type(module.params['security_groups']) == str:
module.params['security_groups'] = module.params[
'security_groups'].split(',')
# translate security_groups to UUID's if names where provided
module.params['security_groups'] = map(
lambda v: get_security_group_id(module, cloud, v),
module.params['security_groups'])
port = None
network_id = None
if name:
port = cloud.get_port(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, port, cloud))
changed = False
if state == 'present':
if not port:
network = module.params['network']
if not network:
module.fail_json(
msg="Parameter 'network' is required in Port Create"
)
port_kwargs = _compose_port_args(module, cloud)
network_object = cloud.get_network(network)
if network_object:
network_id = network_object['id']
else:
module.fail_json(
msg="Specified network was not found."
)
port = cloud.create_port(network_id, **port_kwargs)
changed = True
else:
if _needs_update(module, port, cloud):
port_kwargs = _compose_port_args(module, cloud)
port = cloud.update_port(port['id'], **port_kwargs)
changed = True
module.exit_json(changed=changed, id=port['id'], port=port)
if state == 'absent':
if port:
cloud.delete_port(port['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

@ -0,0 +1,299 @@
#!/usr/bin/python
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_router
short_description: Create or delete routers from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "David Shrewsbury (@Shrews)"
description:
- Create or Delete routers from OpenStack. Although Neutron allows
routers to share the same name, this module enforces name uniqueness
to be more user friendly.
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be give to the router
required: true
admin_state_up:
description:
- Desired admin state of the created or existing router.
required: false
default: true
enable_snat:
description:
- Enable Source NAT (SNAT) attribute.
required: false
default: true
network:
description:
- Unique name or ID of the external gateway network.
type: string
required: true when I(interfaces) or I(enable_snat) are provided,
false otherwise.
default: None
interfaces:
description:
- List of subnets to attach to the router. Each is a dictionary with
the subnet name or ID (subnet) and the IP address to assign on that
subnet (ip). If no IP is specified, one is automatically assigned from
that subnet.
required: false
default: None
requirements: ["shade"]
'''
EXAMPLES = '''
# Create a simple router, not attached to a gateway or subnets.
- os_router:
cloud: mycloud
state: present
name: simple_router
# Creates a router attached to ext_network1 and one subnet interface.
# An IP address from subnet1's IP range will automatically be assigned
# to that interface.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
interfaces:
- subnet: subnet1
# Update existing router1 to include subnet2 (10.5.5.0/24), specifying
# the IP address within subnet2's IP range we'd like for that interface.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
interfaces:
- subnet: subnet1
- subnet: subnet2
ip: 10.5.5.1
# Delete router1
- os_router:
cloud: mycloud
state: absent
name: router1
'''
RETURN = '''
router:
description: Dictionary describing the router.
returned: On success when I(state) is 'present'
type: dictionary
contains:
id:
description: Router ID.
type: string
sample: "474acfe5-be34-494c-b339-50f06aa143e4"
name:
description: Router name.
type: string
sample: "router1"
admin_state_up:
description: Administrative state of the router.
type: boolean
sample: true
status:
description: The router status.
type: string
sample: "ACTIVE"
tenant_id:
description: The tenant ID.
type: string
sample: "861174b82b43463c9edc5202aadc60ef"
external_gateway_info:
description: The external gateway parameters.
type: dictionary
sample: {
"enable_snat": true,
"external_fixed_ips": [
{
"ip_address": "10.6.6.99",
"subnet_id": "4272cb52-a456-4c20-8f3c-c26024ecfa81"
}
]
}
routes:
description: The extra routes configuration for L3 router.
type: list
'''
def _needs_update(cloud, module, router, network):
"""Decide if the given router needs an update.
"""
if router['admin_state_up'] != module.params['admin_state_up']:
return True
if router['external_gateway_info']['enable_snat'] != module.params['enable_snat']:
return True
if network:
if router['external_gateway_info']['network_id'] != network['id']:
return True
# check subnet interfaces
for new_iface in module.params['interfaces']:
subnet = cloud.get_subnet(new_iface['subnet'])
if not subnet:
module.fail_json(msg='subnet %s not found' % new_iface['subnet'])
exists = False
# compare the requested interface with existing, looking for an existing match
for existing_iface in router['external_gateway_info']['external_fixed_ips']:
if existing_iface['subnet_id'] == subnet['id']:
if 'ip' in new_iface:
if existing_iface['ip_address'] == new_iface['ip']:
# both subnet id and ip address match
exists = True
break
else:
# only the subnet was given, so ip doesn't matter
exists = True
break
# this interface isn't present on the existing router
if not exists:
return True
return False
def _system_state_change(cloud, module, router, network):
"""Check if the system state would be changed."""
state = module.params['state']
if state == 'absent' and router:
return True
if state == 'present':
if not router:
return True
return _needs_update(cloud, module, router, network)
return False
def _build_kwargs(cloud, module, router, network):
kwargs = {
'admin_state_up': module.params['admin_state_up'],
}
if router:
kwargs['name_or_id'] = router['id']
else:
kwargs['name'] = module.params['name']
if network:
kwargs['ext_gateway_net_id'] = network['id']
# can't send enable_snat unless we have a network
kwargs['enable_snat'] = module.params['enable_snat']
if module.params['interfaces']:
kwargs['ext_fixed_ips'] = []
for iface in module.params['interfaces']:
subnet = cloud.get_subnet(iface['subnet'])
if not subnet:
module.fail_json(msg='subnet %s not found' % iface['subnet'])
d = {'subnet_id': subnet['id']}
if 'ip' in iface:
d['ip_address'] = iface['ip']
kwargs['ext_fixed_ips'].append(d)
return kwargs
def main():
argument_spec = openstack_full_argument_spec(
state=dict(default='present', choices=['absent', 'present']),
name=dict(required=True),
admin_state_up=dict(type='bool', default=True),
enable_snat=dict(type='bool', default=True),
network=dict(default=None),
interfaces=dict(type='list', default=None)
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
name = module.params['name']
network = module.params['network']
if module.params['interfaces'] and not network:
module.fail_json(msg='network is required when supplying interfaces')
try:
cloud = shade.openstack_cloud(**module.params)
router = cloud.get_router(name)
net = None
if network:
net = cloud.get_network(network)
if not net:
module.fail_json(msg='network %s not found' % network)
if module.check_mode:
module.exit_json(
changed=_system_state_change(cloud, module, router, net)
)
if state == 'present':
changed = False
if not router:
kwargs = _build_kwargs(cloud, module, router, net)
router = cloud.create_router(**kwargs)
changed = True
else:
if _needs_update(cloud, module, router, net):
kwargs = _build_kwargs(cloud, module, router, net)
router = cloud.update_router(**kwargs)
changed = True
module.exit_json(changed=changed, router=router)
elif state == 'absent':
if not router:
module.exit_json(changed=False)
else:
cloud.delete_router(name)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

@ -82,7 +82,10 @@ options:
nics: nics:
description: description:
- A list of networks to which the instance's interface should - A list of networks to which the instance's interface should
be attached. Networks may be referenced by net-id or net-name. be attached. Networks may be referenced by net-id/net-name/port-id
or port-name.
- 'Also this accepts a string containing a list of net-id/port-id.
Eg: nics: "net-id=uuid-1,net-id=uuid-2"'
required: false required: false
default: None default: None
public_ip: public_ip:
@ -108,7 +111,8 @@ options:
meta: meta:
description: description:
- A list of key value pairs that should be provided as a metadata to - A list of key value pairs that should be provided as a metadata to
the new instance. the new instance or a string containing a list of key-value pairs.
Eg: meta: "key1=value1,key2=value2"
required: false required: false
default: None default: None
wait: wait:
@ -241,6 +245,44 @@ EXAMPLES = '''
image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM) image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
flavor_ram: 4096 flavor_ram: 4096
flavor_include: Performance flavor_include: Performance
# Creates a new instance and attaches to multiple network
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance with a string
os_server:
name: vm1
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: vm1
image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
key_name: ansible_key
timeout: 200
flavor: 4
nics: "net-id=4cb08b20-62fe-11e5-9d70-feff819cdc9f,net-id=542f0430-62fe-11e5-9d70-feff819cdc9f..."
# Creates a new instance and attaches to a network and passes metadata to
# the instance
- os_server:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: vm1
image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
key_name: ansible_key
timeout: 200
flavor: 4
nics:
- net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723
- net-name: another_network
meta: "hostname=test1,group=uge_master"
''' '''
@ -252,6 +294,14 @@ def _exit_hostvars(module, cloud, server, changed=True):
def _network_args(module, cloud): def _network_args(module, cloud):
args = [] args = []
nics = module.params['nics']
if type(nics) == str :
for kv_str in nics.split(","):
nic = {}
k, v = kv_str.split("=")
nic[k] = v
args.append(nic)
else:
for net in module.params['nics']: for net in module.params['nics']:
if net.get('net-id'): if net.get('net-id'):
args.append(net) args.append(net)
@ -305,6 +355,13 @@ def _create_server(module, cloud):
nics = _network_args(module, cloud) nics = _network_args(module, cloud)
if type(module.params['meta']) is str:
metas = {}
for kv_str in module.params['meta'].split(","):
k, v = kv_str.split("=")
metas[k] = v
module.params['meta'] = metas
bootkwargs = dict( bootkwargs = dict(
name=module.params['name'], name=module.params['name'],
image=image_id, image=image_id,

@ -112,23 +112,23 @@ requirements:
EXAMPLES = ''' EXAMPLES = '''
# Create a new (or update an existing) subnet on the specified network # Create a new (or update an existing) subnet on the specified network
- os_subnet: - os_subnet:
state=present state: present
network_name=network1 network_name: network1
name=net1subnet name: net1subnet
cidr=192.168.0.0/24 cidr: 192.168.0.0/24
dns_nameservers: dns_nameservers:
- 8.8.8.7 - 8.8.8.7
- 8.8.8.8 - 8.8.8.8
host_routes: host_routes:
- destination: 0.0.0.0/0 - destination: 0.0.0.0/0
nexthop: 123.456.78.9 nexthop: 12.34.56.78
- destination: 192.168.0.0/24 - destination: 192.168.0.0/24
nexthop: 192.168.0.1 nexthop: 192.168.0.1
# Delete a subnet # Delete a subnet
- os_subnet: - os_subnet:
state=absent state: absent
name=net1subnet name: net1subnet
# Create an ipv6 stateless subnet # Create an ipv6 stateless subnet
- os_subnet: - os_subnet:

@ -0,0 +1,152 @@
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_subnets_facts
short_description: Retrieve facts about one or more OpenStack subnets.
version_added: "2.0"
author: "Davide Agnello (@dagnello)"
description:
- Retrieve facts about one or more subnets from OpenStack.
requirements:
- "python >= 2.6"
- "shade"
options:
subnet:
description:
- Name or ID of the subnet
required: false
filters:
description:
- A dictionary of meta data to use for further filtering. Elements of
this dictionary may be additional dictionaries.
required: false
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Gather facts about previously created subnets
- os_subnets_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
- debug: var=openstack_subnets
# Gather facts about a previously created subnet by name
- os_subnets_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
name: subnet1
- debug: var=openstack_subnets
# Gather facts about a previously created subnet with filter (note: name and
filters parameters are Not mutually exclusive)
- os_subnets_facts:
auth:
auth_url: https://your_api_url.com:9000/v2.0
username: user
password: password
project_name: someproject
filters:
tenant_id: 55e2ce24b2a245b09f181bf025724cbe
- debug: var=openstack_subnets
'''
RETURN = '''
This module registers subnet details in facts named: openstack_subnets. If a
subnet name/id and or filter does not result in a subnet found, an empty list
is set in openstack_subnets.
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the subnet.
returned: success
type: string
network_id:
description: Network ID this subnet belongs in.
returned: success
type: string
cidr:
description: Subnet's CIDR.
returned: success
type: string
gateway_ip:
description: Subnet's gateway ip.
returned: success
type: string
enable_dhcp:
description: DHCP enable flag for this subnet.
returned: success
type: bool
ip_version:
description: IP version for this subnet.
returned: success
type: int
tenant_id:
description: Tenant id associated with this subnet.
returned: success
type: string
dns_nameservers:
description: DNS name servers for this subnet.
returned: success
type: list of strings
allocation_pools:
description: Allocation pools associated with this subnet.
returned: success
type: list of dicts
'''
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
filters=dict(required=False, default=None)
)
module = AnsibleModule(argument_spec)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
subnets = cloud.search_subnets(module.params['name'],
module.params['filters'])
module.exit_json(changed=False, ansible_facts=dict(
openstack_subnets=subnets))
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

@ -38,6 +38,16 @@ options:
description: description:
- Volume size of the database 1-150GB - Volume size of the database 1-150GB
default: 2 default: 2
cdb_type:
description:
- type of instance (i.e. MySQL, MariaDB, Percona)
default: MySQL
version_added: "2.0"
cdb_version:
description:
- version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6)
choices: ['5.1', '5.6', '10']
version_added: "2.0"
state: state:
description: description:
- Indicate desired state of the resource - Indicate desired state of the resource
@ -68,6 +78,8 @@ EXAMPLES = '''
name: db-server1 name: db-server1
flavor: 1 flavor: 1
volume: 2 volume: 2
cdb_type: MySQL
cdb_version: 5.6
wait: yes wait: yes
state: present state: present
register: rax_db_server register: rax_db_server
@ -91,10 +103,12 @@ def find_instance(name):
return False return False
def save_instance(module, name, flavor, volume, wait, wait_timeout): def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
wait_timeout):
for arg, value in dict(name=name, flavor=flavor, for arg, value in dict(name=name, flavor=flavor,
volume=volume).iteritems(): volume=volume, type=cdb_type, version=cdb_version
).iteritems():
if not value: if not value:
module.fail_json(msg='%s is required for the "rax_cdb"' module.fail_json(msg='%s is required for the "rax_cdb"'
' module' % arg) ' module' % arg)
@ -118,7 +132,8 @@ def save_instance(module, name, flavor, volume, wait, wait_timeout):
if not instance: if not instance:
action = 'create' action = 'create'
try: try:
instance = cdb.create(name=name, flavor=flavor, volume=volume) instance = cdb.create(name=name, flavor=flavor, volume=volume,
type=cdb_type, version=cdb_version)
except Exception, e: except Exception, e:
module.fail_json(msg='%s' % e.message) module.fail_json(msg='%s' % e.message)
else: else:
@ -189,11 +204,13 @@ def delete_instance(module, name, wait, wait_timeout):
cdb=rax_to_dict(instance)) cdb=rax_to_dict(instance))
def rax_cdb(module, state, name, flavor, volume, wait, wait_timeout): def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait,
wait_timeout):
# act on the state # act on the state
if state == 'present': if state == 'present':
save_instance(module, name, flavor, volume, wait, wait_timeout) save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
wait_timeout)
elif state == 'absent': elif state == 'absent':
delete_instance(module, name, wait, wait_timeout) delete_instance(module, name, wait, wait_timeout)
@ -205,6 +222,8 @@ def main():
name=dict(type='str', required=True), name=dict(type='str', required=True),
flavor=dict(type='int', default=1), flavor=dict(type='int', default=1),
volume=dict(type='int', default=2), volume=dict(type='int', default=2),
cdb_type=dict(type='str', default='MySQL'),
cdb_version=dict(type='str', default='5.6'),
state=dict(default='present', choices=['present', 'absent']), state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=False), wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300), wait_timeout=dict(type='int', default=300),
@ -222,12 +241,14 @@ def main():
name = module.params.get('name') name = module.params.get('name')
flavor = module.params.get('flavor') flavor = module.params.get('flavor')
volume = module.params.get('volume') volume = module.params.get('volume')
cdb_type = module.params.get('type')
cdb_version = module.params.get('version')
state = module.params.get('state') state = module.params.get('state')
wait = module.params.get('wait') wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout') wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax) setup_rax_module(module, pyrax)
rax_cdb(module, state, name, flavor, volume, wait, wait_timeout) rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout)
# import module snippets # import module snippets

@ -271,7 +271,7 @@ def upload(module, cf, container, src, dest, meta, expires):
if path != src: if path != src:
prefix = path.split(src)[-1].lstrip('/') prefix = path.split(src)[-1].lstrip('/')
filenames = [os.path.join(prefix, name) for name in filenames filenames = [os.path.join(prefix, name) for name in filenames
if not os.path.isdir(name)] if not os.path.isdir(os.path.join(path, name))]
objs += filenames objs += filenames
_objs = [] _objs = []

@ -105,6 +105,18 @@ options:
- Data to be uploaded to the servers config drive. This option implies - Data to be uploaded to the servers config drive. This option implies
I(config_drive). Can be a file path or a string I(config_drive). Can be a file path or a string
version_added: 1.8 version_added: 1.8
wait:
description:
- wait for the scaling group to finish provisioning the minimum amount of
servers
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author: "Matt Martz (@sivel)" author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace extends_documentation_fragment: rackspace
''' '''
@ -144,7 +156,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
image=None, key_name=None, loadbalancers=[], meta={}, image=None, key_name=None, loadbalancers=[], meta={},
min_entities=0, max_entities=0, name=None, networks=[], min_entities=0, max_entities=0, name=None, networks=[],
server_name=None, state='present', user_data=None, server_name=None, state='present', user_data=None,
config_drive=False): config_drive=False, wait=True, wait_timeout=300):
changed = False changed = False
au = pyrax.autoscale au = pyrax.autoscale
@ -315,6 +327,16 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
sg.get() sg.get()
if wait:
end_time = time.time() + wait_timeout
infinite = wait_timeout == 0
while infinite or time.time() < end_time:
state = sg.get_state()
if state["pending_capacity"] == 0:
break
time.sleep(5)
module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg)) module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
else: else:
@ -350,6 +372,8 @@ def main():
server_name=dict(required=True), server_name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']), state=dict(default='present', choices=['present', 'absent']),
user_data=dict(no_log=True), user_data=dict(no_log=True),
wait=dict(default=False, type='bool'),
wait_timeout=dict(default=300),
) )
) )

@ -77,6 +77,13 @@ options:
already existed. already existed.
required: false required: false
version_added: "1.5" version_added: "1.5"
remote_src:
description:
- If False, it will search for src at originating/master machine, if True it will go to the remote/target machine for the src. Default is False.
choices: [ "True", "False" ]
required: false
default: "False"
version_added: "2.0"
extends_documentation_fragment: extends_documentation_fragment:
- files - files
- validate - validate

@ -93,10 +93,10 @@ EXAMPLES = '''
# change file ownership, group and mode. When specifying mode using octal numbers, first digit should always be 0. # change file ownership, group and mode. When specifying mode using octal numbers, first digit should always be 0.
- file: path=/etc/foo.conf owner=foo group=foo mode=0644 - file: path=/etc/foo.conf owner=foo group=foo mode=0644
- file: src=/file/to/link/to dest=/path/to/symlink owner=foo group=foo state=link - file: src=/file/to/link/to dest=/path/to/symlink owner=foo group=foo state=link
- file: src=/tmp/{{ item.path }} dest={{ item.dest }} state=link - file: src=/tmp/{{ item.src }} dest={{ item.dest }} state=link
with_items: with_items:
- { path: 'x', dest: 'y' } - { src: 'x', dest: 'y' }
- { path: 'z', dest: 'k' } - { src: 'z', dest: 'k' }
# touch a file, using symbolic modes to set the permissions (equivalent to 0644) # touch a file, using symbolic modes to set the permissions (equivalent to 0644)
- file: path=/etc/foo.conf state=touch mode="u=rw,g=r,o=r" - file: path=/etc/foo.conf state=touch mode="u=rw,g=r,o=r"

@ -205,9 +205,10 @@ synchronize: mode=pull src=some/relative/path dest=/some/absolute/path
# Synchronization of src on delegate host to dest on the current inventory host. # Synchronization of src on delegate host to dest on the current inventory host.
# If delegate_to is set to the current inventory host, this can be used to synchronize # If delegate_to is set to the current inventory host, this can be used to synchronize
# two directories on that host. # two directories on that host.
synchronize: > synchronize:
src=some/relative/path dest=/some/absolute/path src: some/relative/path
delegate_to: delegate.host dest: /some/absolute/path
delegate_to: delegate.host
# Synchronize and delete files in dest on the remote host that are not found in src of localhost. # Synchronize and delete files in dest on the remote host that are not found in src of localhost.
synchronize: src=some/relative/path dest=/some/absolute/path delete=yes synchronize: src=some/relative/path dest=/some/absolute/path delete=yes
@ -222,7 +223,12 @@ synchronize: src=some/relative/path dest=/some/absolute/path rsync_path="sudo rs
+ /var/conf # include /var/conf even though it was previously excluded + /var/conf # include /var/conf even though it was previously excluded
# Synchronize passing in extra rsync options # Synchronize passing in extra rsync options
synchronize: src=/tmp/helloworld dest=/var/www/helloword rsync_opts=--no-motd,--exclude=.git synchronize:
src: /tmp/helloworld
dest: /var/www/helloword
rsync_opts:
- "--no-motd"
- "--exclude=.git"
''' '''

@ -84,7 +84,11 @@ options:
Format: <algorithm>:<checksum>, e.g.: checksum="sha256:D98291AC[...]B6DC7B97" Format: <algorithm>:<checksum>, e.g.: checksum="sha256:D98291AC[...]B6DC7B97"
If you worry about portability, only the sha1 algorithm is available If you worry about portability, only the sha1 algorithm is available
on all platforms and python versions. The third party hashlib on all platforms and python versions. The third party hashlib
library can be installed for access to additional algorithms.' library can be installed for access to additional algorithms.
Additionaly, if a checksum is passed to this parameter, and the file exist under
the C(dest) location, the destination_checksum would be calculated, and if
checksum equals destination_checksum, the file download would be skipped
(unless C(force) is true). '
version_added: "2.0" version_added: "2.0"
required: false required: false
default: null default: null

@ -239,7 +239,7 @@ def main():
virtualenv_python=dict(default=None, required=False, type='str'), virtualenv_python=dict(default=None, required=False, type='str'),
use_mirrors=dict(default='yes', type='bool'), use_mirrors=dict(default='yes', type='bool'),
extra_args=dict(default=None, required=False), extra_args=dict(default=None, required=False),
chdir=dict(default=None, required=False), chdir=dict(default=None, required=False, type='path'),
executable=dict(default=None, required=False), executable=dict(default=None, required=False),
), ),
required_one_of=[['name', 'requirements']], required_one_of=[['name', 'requirements']],
@ -258,6 +258,10 @@ def main():
if state == 'latest' and version is not None: if state == 'latest' and version is not None:
module.fail_json(msg='version is incompatible with state=latest') module.fail_json(msg='version is incompatible with state=latest')
if chdir is None:
# this is done to avoid permissions issues with privilege escalation and virtualenvs
chdir = tempfile.gettempdir()
err = '' err = ''
out = '' out = ''
@ -285,10 +289,7 @@ def main():
cmd += ' -p%s' % virtualenv_python cmd += ' -p%s' % virtualenv_python
cmd = "%s %s" % (cmd, env) cmd = "%s %s" % (cmd, env)
this_dir = tempfile.gettempdir() rc, out_venv, err_venv = module.run_command(cmd, cwd=chdir)
if chdir:
this_dir = os.path.join(this_dir, chdir)
rc, out_venv, err_venv = module.run_command(cmd, cwd=this_dir)
out += out_venv out += out_venv
err += err_venv err += err_venv
if rc != 0: if rc != 0:
@ -328,9 +329,6 @@ def main():
elif requirements: elif requirements:
cmd += ' -r %s' % requirements cmd += ' -r %s' % requirements
this_dir = tempfile.gettempdir()
if chdir:
this_dir = os.path.join(this_dir, chdir)
if module.check_mode: if module.check_mode:
if extra_args or requirements or state == 'latest' or not name: if extra_args or requirements or state == 'latest' or not name:
@ -340,7 +338,8 @@ def main():
module.exit_json(changed=True) module.exit_json(changed=True)
freeze_cmd = '%s freeze' % pip freeze_cmd = '%s freeze' % pip
rc, out_pip, err_pip = module.run_command(freeze_cmd, cwd=this_dir)
rc, out_pip, err_pip = module.run_command(freeze_cmd, cwd=chdir)
if rc != 0: if rc != 0:
module.exit_json(changed=True) module.exit_json(changed=True)
@ -353,7 +352,7 @@ def main():
changed = (state == 'present' and not is_present) or (state == 'absent' and is_present) changed = (state == 'present' and not is_present) or (state == 'absent' and is_present)
module.exit_json(changed=changed, cmd=freeze_cmd, stdout=out, stderr=err) module.exit_json(changed=changed, cmd=freeze_cmd, stdout=out, stderr=err)
rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=this_dir) rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir)
out += out_pip out += out_pip
err += err_pip err += err_pip
if rc == 1 and state == 'absent' and \ if rc == 1 and state == 'absent' and \

@ -7,7 +7,7 @@ short_description: Manage Red Hat Network registration and subscriptions using t
description: description:
- Manage registration and subscription to the Red Hat Network entitlement platform. - Manage registration and subscription to the Red Hat Network entitlement platform.
version_added: "1.2" version_added: "1.2"
author: "James Laska (@jlaska)" author: "Barnaby Court (@barnabycourt)"
notes: notes:
- In order to register a system, subscription-manager requires either a username and password, or an activationkey. - In order to register a system, subscription-manager requires either a username and password, or an activationkey.
requirements: requirements:

@ -61,7 +61,6 @@ EXAMPLES = '''
- rpm_key: state=absent key=DEADB33F - rpm_key: state=absent key=DEADB33F
''' '''
import re import re
import syslog
import os.path import os.path
import urllib2 import urllib2
import tempfile import tempfile
@ -74,7 +73,6 @@ def is_pubkey(string):
class RpmKey: class RpmKey:
def __init__(self, module): def __init__(self, module):
self.syslogging = False
# If the key is a url, we need to check if it's present to be idempotent, # If the key is a url, we need to check if it's present to be idempotent,
# to do that, we need to check the keyid, which we can get from the armor. # to do that, we need to check the keyid, which we can get from the armor.
keyfile = None keyfile = None
@ -163,9 +161,6 @@ class RpmKey:
return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE) return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE)
def execute_command(self, cmd): def execute_command(self, cmd):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
rc, stdout, stderr = self.module.run_command(cmd) rc, stdout, stderr = self.module.run_command(cmd)
if rc != 0: if rc != 0:
self.module.fail_json(msg=stderr) self.module.fail_json(msg=stderr)

@ -26,7 +26,6 @@ import traceback
import os import os
import yum import yum
import rpm import rpm
import syslog
import platform import platform
import tempfile import tempfile
import shutil import shutil
@ -169,10 +168,6 @@ BUFSIZE = 65536
def_qf = "%{name}-%{version}-%{release}.%{arch}" def_qf = "%{name}-%{version}-%{release}.%{arch}"
def log(msg):
syslog.openlog('ansible-yum', 0, syslog.LOG_USER)
syslog.syslog(syslog.LOG_NOTICE, msg)
def yum_base(conf_file=None): def yum_base(conf_file=None):
my = yum.YumBase() my = yum.YumBase()
@ -760,7 +755,11 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
if update_all: if update_all:
cmd = yum_basecmd + ['update'] cmd = yum_basecmd + ['update']
will_update = set(updates.keys())
will_update_from_other_package = dict()
else: else:
will_update = set()
will_update_from_other_package = dict()
for spec in items: for spec in items:
# some guess work involved with groups. update @<group> will install the group if missing # some guess work involved with groups. update @<group> will install the group if missing
if spec.startswith('@'): if spec.startswith('@'):
@ -784,8 +783,19 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
nothing_to_do = False nothing_to_do = False
break break
if spec in pkgs['update'] and spec in updates.keys(): # this contains the full NVR and spec could contain wildcards
# or virtual provides (like "python-*" or "smtp-daemon") while
# updates contains name only.
this_name_only = '-'.join(this.split('-')[:-2])
if spec in pkgs['update'] and this_name_only in updates.keys():
nothing_to_do = False nothing_to_do = False
will_update.add(spec)
# Massage the updates list
if spec != this_name_only:
# For reporting what packages would be updated more
# succinctly
will_update_from_other_package[spec] = this_name_only
break
if nothing_to_do: if nothing_to_do:
res['results'].append("All packages providing %s are up to date" % spec) res['results'].append("All packages providing %s are up to date" % spec)
@ -798,12 +808,6 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts) res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
module.fail_json(**res) module.fail_json(**res)
# list of package updates
if update_all:
will_update = updates.keys()
else:
will_update = [u for u in pkgs['update'] if u in updates.keys() or u.startswith('@')]
# check_mode output # check_mode output
if module.check_mode: if module.check_mode:
to_update = [] to_update = []
@ -811,6 +815,9 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
if w.startswith('@'): if w.startswith('@'):
to_update.append((w, None)) to_update.append((w, None))
msg = '%s will be updated' % w msg = '%s will be updated' % w
elif w not in updates:
other_pkg = will_update_from_other_package[w]
to_update.append((w, 'because of (at least) %s-%s.%s from %s' % (other_pkg, updates[other_pkg]['version'], updates[other_pkg]['dist'], updates[other_pkg]['repo'])))
else: else:
to_update.append((w, '%s.%s from %s' % (updates[w]['version'], updates[w]['dist'], updates[w]['repo']))) to_update.append((w, '%s.%s from %s' % (updates[w]['version'], updates[w]['dist'], updates[w]['repo'])))

@ -453,7 +453,7 @@ def is_local_branch(git_path, module, dest, branch):
def is_not_a_branch(git_path, module, dest): def is_not_a_branch(git_path, module, dest):
branches = get_branches(git_path, module, dest) branches = get_branches(git_path, module, dest)
for b in branches: for b in branches:
if b.startswith('* ') and 'no branch' in b: if b.startswith('* ') and ('no branch' in b or 'detached from' in b):
return True return True
return False return False

@ -171,9 +171,10 @@ class Subversion(object):
'''True if revisioned files have been added or modified. Unrevisioned files are ignored.''' '''True if revisioned files have been added or modified. Unrevisioned files are ignored.'''
lines = self._exec(["status", "--quiet", "--ignore-externals", self.dest]) lines = self._exec(["status", "--quiet", "--ignore-externals", self.dest])
# The --quiet option will return only modified files. # The --quiet option will return only modified files.
# Match only revisioned files, i.e. ignore status '?'.
regex = re.compile(r'^[^?X]')
# Has local mods if more than 0 modifed revisioned files. # Has local mods if more than 0 modifed revisioned files.
return len(filter(len, lines)) > 0 return len(filter(regex.match, lines)) > 0
def needs_update(self): def needs_update(self):
curr, url = self.get_revision() curr, url = self.get_revision()

@ -67,6 +67,7 @@ options:
cron_file: cron_file:
description: description:
- If specified, uses this file in cron.d instead of an individual user's crontab. - If specified, uses this file in cron.d instead of an individual user's crontab.
To use the C(cron_file) parameter you must specify the C(user) as well.
required: false required: false
default: null default: null
backup: backup:
@ -178,9 +179,6 @@ class CronTab(object):
self.lines = None self.lines = None
self.ansible = "#Ansible: " self.ansible = "#Ansible: "
# select whether we dump additional debug info through syslog
self.syslogging = False
if cron_file: if cron_file:
self.cron_file = '/etc/cron.d/%s' % cron_file self.cron_file = '/etc/cron.d/%s' % cron_file
else: else:
@ -218,10 +216,6 @@ class CronTab(object):
self.lines.append(l) self.lines.append(l)
count += 1 count += 1
def log_message(self, message):
if self.syslogging:
syslog.syslog(syslog.LOG_NOTICE, 'ansible: "%s"' % message)
def is_empty(self): def is_empty(self):
if len(self.lines) == 0: if len(self.lines) == 0:
return True return True
@ -458,9 +452,7 @@ def main():
os.umask(022) os.umask(022)
crontab = CronTab(module, user, cron_file) crontab = CronTab(module, user, cron_file)
if crontab.syslogging: module.debug('cron instantiated - name: "%s"' % name)
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'cron instantiated - name: "%s"' % name)
# --- user input validation --- # --- user input validation ---
@ -495,6 +487,7 @@ def main():
(backuph, backup_file) = tempfile.mkstemp(prefix='crontab') (backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
crontab.write(backup_file) crontab.write(backup_file)
if crontab.cron_file and not name and not do_install: if crontab.cron_file and not name and not do_install:
changed = crontab.remove_job_file() changed = crontab.remove_job_file()
module.exit_json(changed=changed,cron_file=cron_file,state=state) module.exit_json(changed=changed,cron_file=cron_file,state=state)

@ -57,7 +57,6 @@ EXAMPLES = '''
''' '''
import grp import grp
import syslog
import platform import platform
class Group(object): class Group(object):
@ -86,13 +85,8 @@ class Group(object):
self.name = module.params['name'] self.name = module.params['name']
self.gid = module.params['gid'] self.gid = module.params['gid']
self.system = module.params['system'] self.system = module.params['system']
self.syslogging = False
def execute_command(self, cmd): def execute_command(self, cmd):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
return self.module.run_command(cmd) return self.module.run_command(cmd)
def group_del(self): def group_del(self):
@ -395,11 +389,9 @@ def main():
group = Group(module) group = Group(module)
if group.syslogging: module.debug('Group instantiated - platform %s' % group.platform)
syslog.openlog('ansible-%s' % os.path.basename(__file__)) if group.distribution:
syslog.syslog(syslog.LOG_NOTICE, 'Group instantiated - platform %s' % group.platform) module.debug('Group instantiated - distribution %s' % group.distribution)
if user.distribution:
syslog.syslog(syslog.LOG_NOTICE, 'Group instantiated - distribution %s' % group.distribution)
rc = None rc = None
out = '' out = ''

@ -74,14 +74,6 @@ options:
description: description:
- Additional arguments provided on the command line - Additional arguments provided on the command line
aliases: [ 'args' ] aliases: [ 'args' ]
must_exist:
required: false
default: true
version_added: "2.0"
description:
- Avoid a module failure if the named service does not exist. Useful
for opportunistically starting/stopping/restarting a list of
potential services.
''' '''
EXAMPLES = ''' EXAMPLES = '''
@ -106,8 +98,6 @@ EXAMPLES = '''
# Example action to restart network service for interface eth0 # Example action to restart network service for interface eth0
- service: name=network state=restarted args=eth0 - service: name=network state=restarted args=eth0
# Example action to restart nova-compute if it exists
- service: name=nova-compute state=restarted must_exist=no
''' '''
import platform import platform
@ -169,9 +159,6 @@ class Service(object):
self.rcconf_value = None self.rcconf_value = None
self.svc_change = False self.svc_change = False
# select whether we dump additional debug info through syslog
self.syslogging = False
# =========================================== # ===========================================
# Platform specific methods (must be replaced by subclass). # Platform specific methods (must be replaced by subclass).
@ -191,9 +178,6 @@ class Service(object):
# Generic methods that should be used on all platforms. # Generic methods that should be used on all platforms.
def execute_command(self, cmd, daemonize=False): def execute_command(self, cmd, daemonize=False):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s, daemonize %r' % (cmd, daemonize))
# Most things don't need to be daemonized # Most things don't need to be daemonized
if not daemonize: if not daemonize:
@ -481,9 +465,6 @@ class LinuxService(Service):
self.enable_cmd = location['chkconfig'] self.enable_cmd = location['chkconfig']
if self.enable_cmd is None: if self.enable_cmd is None:
if self.module.params['must_exist']:
self.module.fail_json(msg="no service or tool found for: %s" % self.name)
else:
# exiting without change on non-existent service # exiting without change on non-existent service
self.module.exit_json(changed=False, exists=False) self.module.exit_json(changed=False, exists=False)
@ -493,10 +474,6 @@ class LinuxService(Service):
# couldn't find anything yet # couldn't find anything yet
if self.svc_cmd is None and not self.svc_initscript: if self.svc_cmd is None and not self.svc_initscript:
if self.module.params['must_exist']:
self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting')
else:
# exiting without change on non-existent service
self.module.exit_json(changed=False, exists=False) self.module.exit_json(changed=False, exists=False)
if location.get('initctl', False): if location.get('initctl', False):
@ -1442,7 +1419,6 @@ def main():
enabled = dict(type='bool'), enabled = dict(type='bool'),
runlevel = dict(required=False, default='default'), runlevel = dict(required=False, default='default'),
arguments = dict(aliases=['args'], default=''), arguments = dict(aliases=['args'], default=''),
must_exist = dict(type='bool', default=True),
), ),
supports_check_mode=True supports_check_mode=True
) )
@ -1451,11 +1427,9 @@ def main():
service = Service(module) service = Service(module)
if service.syslogging: module.debug('Service instantiated - platform %s' % service.platform)
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Service instantiated - platform %s' % service.platform)
if service.distribution: if service.distribution:
syslog.syslog(syslog.LOG_NOTICE, 'Service instantiated - distribution %s' % service.distribution) module.debug('Service instantiated - distribution %s' % service.distribution)
rc = 0 rc = 0
out = '' out = ''
@ -1527,4 +1501,5 @@ def main():
module.exit_json(**result) module.exit_json(**result)
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
main() main()

@ -123,6 +123,8 @@ class SysctlModule(object):
def process(self): def process(self):
self.platform = get_platform().lower()
# Whitespace is bad # Whitespace is bad
self.args['name'] = self.args['name'].strip() self.args['name'] = self.args['name'].strip()
self.args['value'] = self._parse_value(self.args['value']) self.args['value'] = self._parse_value(self.args['value'])
@ -206,6 +208,10 @@ class SysctlModule(object):
# Use the sysctl command to find the current value # Use the sysctl command to find the current value
def get_token_curr_value(self, token): def get_token_curr_value(self, token):
if self.platform == 'openbsd':
# openbsd doesn't support -e, just drop it
thiscmd = "%s -n %s" % (self.sysctl_cmd, token)
else:
thiscmd = "%s -e -n %s" % (self.sysctl_cmd, token) thiscmd = "%s -e -n %s" % (self.sysctl_cmd, token)
rc,out,err = self.module.run_command(thiscmd) rc,out,err = self.module.run_command(thiscmd)
if rc != 0: if rc != 0:
@ -217,6 +223,10 @@ class SysctlModule(object):
def set_token_value(self, token, value): def set_token_value(self, token, value):
if len(value.split()) > 0: if len(value.split()) > 0:
value = '"' + value + '"' value = '"' + value + '"'
if self.platform == 'openbsd':
# openbsd doesn't accept -w, but since it's not needed, just drop it
thiscmd = "%s %s=%s" % (self.sysctl_cmd, token, value)
else:
thiscmd = "%s -w %s=%s" % (self.sysctl_cmd, token, value) thiscmd = "%s -w %s=%s" % (self.sysctl_cmd, token, value)
rc,out,err = self.module.run_command(thiscmd) rc,out,err = self.module.run_command(thiscmd)
if rc != 0: if rc != 0:
@ -227,9 +237,20 @@ class SysctlModule(object):
# Run sysctl -p # Run sysctl -p
def reload_sysctl(self): def reload_sysctl(self):
# do it # do it
if get_platform().lower() == 'freebsd': if self.platform == 'freebsd':
# freebsd doesn't support -p, so reload the sysctl service # freebsd doesn't support -p, so reload the sysctl service
rc,out,err = self.module.run_command('/etc/rc.d/sysctl reload') rc,out,err = self.module.run_command('/etc/rc.d/sysctl reload')
elif self.platform == 'openbsd':
# openbsd doesn't support -p and doesn't have a sysctl service,
# so we have to set every value with its own sysctl call
for k, v in self.file_values.items():
rc = 0
if k != self.args['name']:
rc = self.set_token_value(k, v)
if rc != 0:
break
if rc == 0 and self.args['state'] == "present":
rc = self.set_token_value(self.args['name'], self.args['value'])
else: else:
# system supports reloading via the -p flag to sysctl, so we'll use that # system supports reloading via the -p flag to sysctl, so we'll use that
sysctl_args = [self.sysctl_cmd, '-p', self.sysctl_file] sysctl_args = [self.sysctl_cmd, '-p', self.sysctl_file]

@ -83,7 +83,7 @@ options:
description: description:
- Optionally set the user's password to this crypted value. See - Optionally set the user's password to this crypted value. See
the user example in the github examples directory for what this looks the user example in the github examples directory for what this looks
like in a playbook. See U(http://docs.ansible.com/faq.html#how-do-i-generate-crypted-passwords-for-the-user-module) like in a playbook. See U(http://docs.ansible.com/ansible/faq.html#how-do-i-generate-crypted-passwords-for-the-user-module)
for details on various ways to generate these password values. for details on various ways to generate these password values.
Note on Darwin system, this value has to be cleartext. Note on Darwin system, this value has to be cleartext.
Beware of security issues. Beware of security issues.
@ -212,7 +212,6 @@ EXAMPLES = '''
import os import os
import pwd import pwd
import grp import grp
import syslog
import platform import platform
import socket import socket
import time import time
@ -290,15 +289,8 @@ class User(object):
else: else:
self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type) self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type)
# select whether we dump additional debug info through syslog
self.syslogging = False
def execute_command(self, cmd, use_unsafe_shell=False, data=None): def execute_command(self, cmd, use_unsafe_shell=False, data=None):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data) return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
def remove_user_userdel(self): def remove_user_userdel(self):
@ -2079,11 +2071,9 @@ def main():
user = User(module) user = User(module)
if user.syslogging: module.debug('User instantiated - platform %s' % user.platform)
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'User instantiated - platform %s' % user.platform)
if user.distribution: if user.distribution:
syslog.syslog(syslog.LOG_NOTICE, 'User instantiated - distribution %s' % user.distribution) module.debug('User instantiated - distribution %s' % user.distribution)
rc = None rc = None
out = '' out = ''

@ -272,6 +272,7 @@ class LocalSocketThread(Thread):
pass pass
def terminate(self): def terminate(self):
super(LocalSocketThread, self).terminate()
self.terminated = True self.terminated = True
self.s.shutdown(socket.SHUT_RDWR) self.s.shutdown(socket.SHUT_RDWR)
self.s.close() self.s.close()
@ -311,7 +312,6 @@ class ThreadedTCPServer(SocketServer.ThreadingTCPServer):
SocketServer.ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass) SocketServer.ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass)
def shutdown(self): def shutdown(self):
self.local_thread.terminate()
self.running = False self.running = False
SocketServer.ThreadingTCPServer.shutdown(self) SocketServer.ThreadingTCPServer.shutdown(self)
@ -472,8 +472,6 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def command(self, data): def command(self, data):
if 'cmd' not in data: if 'cmd' not in data:
return dict(failed=True, msg='internal error: cmd is required') return dict(failed=True, msg='internal error: cmd is required')
if 'tmp_path' not in data:
return dict(failed=True, msg='internal error: tmp_path is required')
vvvv("executing: %s" % data['cmd']) vvvv("executing: %s" % data['cmd'])
@ -601,15 +599,14 @@ def daemonize(module, password, port, timeout, minutes, use_ipv6, pid_file):
server.shutdown() server.shutdown()
else: else:
# reschedule the check # reschedule the check
vvvv("daemon idle for %d seconds (timeout=%d)" % (total_seconds,minutes*60)) signal.alarm(1)
signal.alarm(30)
except: except:
pass pass
finally: finally:
server.last_event_lock.release() server.last_event_lock.release()
signal.signal(signal.SIGALRM, timer_handler) signal.signal(signal.SIGALRM, timer_handler)
signal.alarm(30) signal.alarm(1)
tries = 5 tries = 5
while tries > 0: while tries > 0:

@ -122,7 +122,7 @@ def main():
if supervisorctl_path: if supervisorctl_path:
supervisorctl_path = os.path.expanduser(supervisorctl_path) supervisorctl_path = os.path.expanduser(supervisorctl_path)
if os.path.exists(supervisorctl_path) and module.is_executable(supervisorctl_path): if os.path.exists(supervisorctl_path) and is_executable(supervisorctl_path):
supervisorctl_args = [supervisorctl_path] supervisorctl_args = [supervisorctl_path]
else: else:
module.fail_json( module.fail_json(
@ -239,5 +239,6 @@ def main():
# import module snippets # import module snippets
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
# is_executable from basic
main() if __name__ == '__main__':
main()

@ -64,6 +64,15 @@ Set-Attr $result.ansible_facts "ansible_os_name" ($win32_os.Name.Split('|')[0]).
Set-Attr $result.ansible_facts "ansible_distribution" $osversion.VersionString Set-Attr $result.ansible_facts "ansible_distribution" $osversion.VersionString
Set-Attr $result.ansible_facts "ansible_distribution_version" $osversion.Version.ToString() Set-Attr $result.ansible_facts "ansible_distribution_version" $osversion.Version.ToString()
$date = New-Object psobject
Set-Attr $date "date" (Get-Date -format d)
Set-Attr $date "year" (Get-Date -format yyyy)
Set-Attr $date "month" (Get-Date -format MM)
Set-Attr $date "day" (Get-Date -format dd)
Set-Attr $date "hour" (Get-Date -format HH)
Set-Attr $date "iso8601" (Get-Date -format s)
Set-Attr $result.ansible_facts "ansible_date_time" $date
Set-Attr $result.ansible_facts "ansible_totalmem" $capacity Set-Attr $result.ansible_facts "ansible_totalmem" $capacity
Set-Attr $result.ansible_facts "ansible_lastboot" $win32_os.lastbootuptime.ToString("u") Set-Attr $result.ansible_facts "ansible_lastboot" $win32_os.lastbootuptime.ToString("u")
@ -77,6 +86,10 @@ $psversion = $PSVersionTable.PSVersion.Major
Set-Attr $result.ansible_facts "ansible_powershell_version" $psversion Set-Attr $result.ansible_facts "ansible_powershell_version" $psversion
$winrm_https_listener_parent_path = Get-ChildItem -Path WSMan:\localhost\Listener -Recurse | Where-Object {$_.PSChildName -eq "Transport" -and $_.Value -eq "HTTPS"} | select PSParentPath $winrm_https_listener_parent_path = Get-ChildItem -Path WSMan:\localhost\Listener -Recurse | Where-Object {$_.PSChildName -eq "Transport" -and $_.Value -eq "HTTPS"} | select PSParentPath
$winrm_https_listener_path = $null
$https_listener = $null
$winrm_cert_thumbprint = $null
$uppercase_cert_thumbprint = $null
if ($winrm_https_listener_parent_path ) { if ($winrm_https_listener_parent_path ) {
$winrm_https_listener_path = $winrm_https_listener_parent_path.PSParentPath.Substring($winrm_https_listener_parent_path.PSParentPath.LastIndexOf("\")) $winrm_https_listener_path = $winrm_https_listener_parent_path.PSParentPath.Substring($winrm_https_listener_parent_path.PSParentPath.LastIndexOf("\"))

@ -71,18 +71,15 @@ If (Test-Path $path)
} }
Else Else
{ {
# Only files have the .Directory attribute. If ( $state -eq "directory" -and -not $fileinfo.PsIsContainer )
If ( $state -eq "directory" -and $fileinfo.Directory )
{ {
Fail-Json (New-Object psobject) "path is not a directory" Fail-Json (New-Object psobject) "path is not a directory"
} }
# Only files have the .Directory attribute. If ( $state -eq "file" -and $fileinfo.PsIsContainer )
If ( $state -eq "file" -and -not $fileinfo.Directory )
{ {
Fail-Json (New-Object psobject) "path is not a file" Fail-Json (New-Object psobject) "path is not a file"
} }
} }
} }
Else Else

@ -30,7 +30,7 @@ If ($params.url) {
$url = $params.url $url = $params.url
} }
Else { Else {
Fail-Json $result "mising required argument: url" Fail-Json $result "missing required argument: url"
} }
If ($params.dest) { If ($params.dest) {

@ -387,8 +387,11 @@ Elseif (Test-Path $dest) {
$found = $FALSE; $found = $FALSE;
Foreach ($encoding in $sortedlist.GetValueList()) { Foreach ($encoding in $sortedlist.GetValueList()) {
$preamble = $encoding.GetPreamble(); $preamble = $encoding.GetPreamble();
If ($preamble) { If ($preamble -and $bom) {
Foreach ($i in 0..$preamble.Length) { Foreach ($i in 0..($preamble.Length - 1)) {
If ($i -ge $bom.Length) {
break;
}
If ($preamble[$i] -ne $bom[$i]) { If ($preamble[$i] -ne $bom[$i]) {
break; break;
} }
@ -427,7 +430,7 @@ If ($state -eq "present") {
} }
Else { Else {
If ($regex -eq $FALSE -and $line -eq $FALSE) { If ($regexp -eq $FALSE -and $line -eq $FALSE) {
Fail-Json (New-Object psobject) "one of line= or regexp= is required with state=absent"; Fail-Json (New-Object psobject) "one of line= or regexp= is required with state=absent";
} }

Loading…
Cancel
Save