merge devel

reviewable/pr18780/r1
Phil 10 years ago
commit 3db78457ce

@ -0,0 +1,16 @@
sudo: false
language: python
python:
- "2.7"
addons:
apt:
sources:
- deadsnakes
packages:
- python2.4
- python2.6
script:
- python2.4 -m compileall -fq -x 'cloud/' .
- python2.4 -m compileall -fq cloud/amazon/_ec2_ami_search.py cloud/amazon/ec2_facts.py
- python2.6 -m compileall -fq .
- python2.7 -m compileall -fq .

@ -21,6 +21,7 @@ DOCUMENTATION = '''
---
module: ec2_ami_search
short_description: Retrieve AWS AMI information for a given operating system.
deprecated: "in favor of the ec2_ami_find module"
version_added: "1.6"
description:
- Look up the most recent AMI on AWS for a given operating system.
@ -194,7 +195,7 @@ def main():
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()

@ -54,6 +54,12 @@ options:
required: false
default: null
aliases: []
notification_arns:
description:
- The Simple Notification Service (SNS) topic ARNs to publish stack related events.
required: false
default: null
version_added: "2.0"
stack_policy:
description:
- the path of the cloudformation stack policy
@ -81,8 +87,14 @@ options:
- Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region as the stack. This parameter is mutually exclusive with 'template'. Either one of them is required if "state" parameter is "present"
required: false
version_added: "2.0"
template_format:
description: For local templates, allows specification of json or yaml format
default: json
choices: [ json, yaml ]
required: false
version_added: "2.0"
author: James S. Martin
author: "James S. Martin (@jsmartin)"
extends_documentation_fragment: aws
'''
@ -127,6 +139,7 @@ EXAMPLES = '''
import json
import time
import yaml
try:
import boto
@ -191,6 +204,11 @@ def stack_operation(cfn, stack_name, operation):
events = map(str, list(stack.describe_events())),
output = 'Stack %s failed' % operation)
break
elif '%s_ROLLBACK_FAILED' % operation == stack.stack_status:
result = dict(changed=True, failed=True,
events = map(str, list(stack.describe_events())),
output = 'Stack %s rollback failed' % operation)
break
else:
time.sleep(5)
return result
@ -216,9 +234,11 @@ def main():
template_parameters=dict(required=False, type='dict', default={}),
state=dict(default='present', choices=['present', 'absent']),
template=dict(default=None, required=False),
notification_arns=dict(default=None, required=False),
stack_policy=dict(default=None, required=False),
disable_rollback=dict(default=False, type='bool'),
template_url=dict(default=None, required=False),
template_format=dict(default='json', choices=['json', 'yaml'], required=False),
tags=dict(default=None)
)
)
@ -245,6 +265,14 @@ def main():
else:
template_body = None
if module.params['template_format'] == 'yaml':
if template_body is None:
module.fail_json(msg='yaml format only supported for local templates')
else:
template_body = json.dumps(yaml.load(template_body), indent=2)
notification_arns = module.params['notification_arns']
if module.params['stack_policy'] is not None:
stack_policy_body = open(module.params['stack_policy'], 'r').read()
else:
@ -285,6 +313,7 @@ def main():
try:
cfn.create_stack(stack_name, parameters=template_parameters_tup,
template_body=template_body,
notification_arns=notification_arns,
stack_policy_body=stack_policy_body,
template_url=template_url,
disable_rollback=disable_rollback,
@ -307,6 +336,7 @@ def main():
try:
cfn.update_stack(stack_name, parameters=template_parameters_tup,
template_body=template_body,
notification_arns=notification_arns,
stack_policy_body=stack_policy_body,
disable_rollback=disable_rollback,
template_url=template_url,

@ -44,7 +44,7 @@ options:
region:
version_added: "1.2"
description:
- The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used.
- The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used. See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
@ -57,16 +57,17 @@ options:
aliases: [ 'aws_zone', 'ec2_zone' ]
instance_type:
description:
- instance type to use for the instance
- instance type to use for the instance, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
required: true
default: null
aliases: []
tenancy:
version_added: "1.9"
description:
- An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are "default" or "dedicated". Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances.
- An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances.
required: false
default: default
choices: [ "default", "dedicated" ]
aliases: []
spot_price:
version_added: "1.5"
@ -75,6 +76,14 @@ options:
required: false
default: null
aliases: []
spot_type:
version_added: "2.0"
description:
- Type of spot request; one of "one-time" or "persistent". Defaults to "one-time" if not supplied.
required: false
default: "one-time"
choices: [ "one-time", "persistent" ]
aliases: []
image:
description:
- I(ami) ID to use for the instance
@ -123,6 +132,7 @@ options:
- enable detailed monitoring (CloudWatch) for instance
required: false
default: null
choices: [ "yes", "no" ]
aliases: []
user_data:
version_added: "0.9"
@ -158,6 +168,7 @@ options:
- when provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+
required: false
default: null
choices: [ "yes", "no" ]
aliases: []
private_ip:
version_added: "1.2"
@ -185,7 +196,15 @@ options:
description:
- Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers)
required: false
default: true
default: yes
choices: [ "yes", "no" ]
termination_protection:
version_added: "2.0"
description:
- Enable or Disable the Termination Protection
required: false
default: no
choices: [ "yes", "no" ]
state:
version_added: "1.3"
description:
@ -197,7 +216,7 @@ options:
volumes:
version_added: "1.5"
description:
- a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume.
- "a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. Encrypt the volume by passing 'encrypted: true' in the volume dict."
required: false
default: null
aliases: []
@ -222,7 +241,10 @@ options:
default: null
aliases: []
author: Seth Vidal, Tim Gerla, Lester Wade
author:
- "Tim Gerla (@tgerla)"
- "Lester Wade (@lwade)"
- "Seth Vidal"
extends_documentation_fragment: aws
'''
@ -605,6 +627,19 @@ def get_instance_info(inst):
except AttributeError:
instance_info['ebs_optimized'] = False
try:
bdm_dict = {}
bdm = getattr(inst, 'block_device_mapping')
for device_name in bdm.keys():
bdm_dict[device_name] = {
'status': bdm[device_name].status,
'volume_id': bdm[device_name].volume_id,
'delete_on_termination': bdm[device_name].delete_on_termination
}
instance_info['block_device_mapping'] = bdm_dict
except AttributeError:
instance_info['block_device_mapping'] = False
try:
instance_info['tenancy'] = getattr(inst, 'placement_tenancy')
except AttributeError:
@ -665,7 +700,8 @@ def create_block_device(module, ec2, volume):
size=volume.get('volume_size'),
volume_type=volume.get('device_type'),
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'))
iops=volume.get('iops'),
encrypted=volume.get('encrypted', False))
def boto_supports_param_in_spot_request(ec2, param):
"""
@ -755,6 +791,7 @@ def create_instances(module, ec2, vpc, override_count=None):
instance_type = module.params.get('instance_type')
tenancy = module.params.get('tenancy')
spot_price = module.params.get('spot_price')
spot_type = module.params.get('spot_type')
image = module.params.get('image')
if override_count:
count = override_count
@ -778,6 +815,7 @@ def create_instances(module, ec2, vpc, override_count=None):
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
source_dest_check = module.boolean(module.params.get('source_dest_check'))
termination_protection = module.boolean(module.params.get('termination_protection'))
# group_id and group_name are exclusive of each other
if group_id and group_name:
@ -939,7 +977,7 @@ def create_instances(module, ec2, vpc, override_count=None):
if private_ip:
module.fail_json(
msg='private_ip only available with on-demand (non-spot) instances')
if boto_supports_param_in_spot_request(ec2, placement_group):
if boto_supports_param_in_spot_request(ec2, 'placement_group'):
params['placement_group'] = placement_group
elif placement_group :
module.fail_json(
@ -947,6 +985,7 @@ def create_instances(module, ec2, vpc, override_count=None):
params.update(dict(
count = count_remaining,
type = spot_type,
))
res = ec2.request_spot_instances(spot_price, **params)
@ -1006,11 +1045,16 @@ def create_instances(module, ec2, vpc, override_count=None):
for res in res_list:
running_instances.extend(res.instances)
# Enabled by default by Amazon
if not source_dest_check:
# Enabled by default by AWS
if source_dest_check is False:
for inst in res.instances:
inst.modify_attribute('sourceDestCheck', False)
# Disabled by default by AWS
if termination_protection is True:
for inst in res.instances:
inst.modify_attribute('disableApiTermination', True)
# Leave this as late as possible to try and avoid InvalidInstanceID.NotFound
if instance_tags:
try:
@ -1021,6 +1065,7 @@ def create_instances(module, ec2, vpc, override_count=None):
instance_dict_array = []
created_instance_ids = []
for inst in running_instances:
inst.update()
d = get_instance_info(inst)
created_instance_ids.append(inst.id)
instance_dict_array.append(d)
@ -1089,6 +1134,13 @@ def terminate_instances(module, ec2, instance_ids):
# waiting took too long
if wait_timeout < time.time() and num_terminated < len(terminated_instance_ids):
module.fail_json(msg = "wait for instance termination timeout on %s" % time.asctime())
#Lets get the current state of the instances after terminating - issue600
instance_dict_array = []
for res in ec2.get_all_instances(instance_ids=terminated_instance_ids,\
filters={'instance-state-name':'terminated'}):
for inst in res.instances:
instance_dict_array.append(get_instance_info(inst))
return (changed, instance_dict_array, terminated_instance_ids)
@ -1119,21 +1171,32 @@ def startstop_instances(module, ec2, instance_ids, state):
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
module.fail_json(msg='instance_ids should be a list of instances, aborting')
# Check that our instances are not in the state we want to take them to
# and change them to our desired state
# Check (and eventually change) instances attributes and instances state
running_instances_array = []
for res in ec2.get_all_instances(instance_ids):
for inst in res.instances:
if inst.state != state:
instance_dict_array.append(get_instance_info(inst))
try:
if state == 'running':
inst.start()
else:
inst.stop()
except EC2ResponseError, e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
changed = True
# Check "source_dest_check" attribute
if inst.get_attribute('sourceDestCheck')['sourceDestCheck'] != source_dest_check:
inst.modify_attribute('sourceDestCheck', source_dest_check)
changed = True
# Check "termination_protection" attribute
if inst.get_attribute('disableApiTermination')['disableApiTermination'] != termination_protection:
inst.modify_attribute('disableApiTermination', termination_protection)
changed = True
# Check instance state
if inst.state != state:
instance_dict_array.append(get_instance_info(inst))
try:
if state == 'running':
inst.start()
else:
inst.stop()
except EC2ResponseError, e:
module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))
changed = True
## Wait for all the instances to finish starting or stopping
wait_timeout = time.time() + wait_timeout
@ -1167,6 +1230,7 @@ def main():
zone = dict(aliases=['aws_zone', 'ec2_zone']),
instance_type = dict(aliases=['type']),
spot_price = dict(),
spot_type = dict(default='one-time', choices=["one-time", "persistent"]),
image = dict(),
kernel = dict(),
count = dict(type='int', default='1'),
@ -1184,7 +1248,8 @@ def main():
instance_profile_name = dict(),
instance_ids = dict(type='list', aliases=['instance_id']),
source_dest_check = dict(type='bool', default=True),
state = dict(default='present'),
termination_protection = dict(type='bool', default=False),
state = dict(default='present', choices=['present', 'absent', 'running', 'stopped']),
exact_count = dict(type='int', default=None),
count_tag = dict(),
volumes = dict(type='list'),
@ -1207,15 +1272,11 @@ def main():
ec2 = ec2_connect(module)
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if region:
try:
vpc = boto.vpc.connect_to_region(
region,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key
)
vpc = boto.vpc.connect_to_region(region, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
else:

@ -20,38 +20,33 @@ module: ec2_ami
version_added: "1.3"
short_description: create or destroy an image in ec2
description:
- Creates or deletes ec2 images.
- Creates or deletes ec2 images.
options:
instance_id:
description:
- instance id of the image to create
required: false
default: null
aliases: []
name:
description:
- The name of the new image to create
required: false
default: null
aliases: []
wait:
description:
- wait for the AMI to be in state 'available' before returning.
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
aliases: []
state:
description:
- create or deregister/delete image
required: false
default: 'present'
aliases: []
region:
description:
- The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used.
@ -63,28 +58,36 @@ options:
- An optional human-readable string describing the contents and purpose of the AMI.
required: false
default: null
aliases: []
no_reboot:
description:
- An optional flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the responsibility of maintaining file system integrity is left to the owner of the instance. The default choice is "no".
required: false
default: no
choices: [ "yes", "no" ]
aliases: []
image_id:
description:
- Image ID to be deregistered.
required: false
default: null
aliases: []
device_mapping:
version_added: "2.0"
description:
- An optional list of devices with custom configurations (same block-device-mapping parameters)
required: false
default: null
delete_snapshot:
description:
- Whether or not to delete an AMI while deregistering it.
required: false
default: null
aliases: []
tags:
description:
- a hash/dictionary of tags to add to the new image; '{"key":"value"}' and '{"key":"value","key":"value"}'
required: false
default: null
version_added: "2.0"
author: Evan Duffield <eduffield@iacquire.com>
author: "Evan Duffield (@scicoin-project) <eduffield@iacquire.com>"
extends_documentation_fragment: aws
'''
@ -98,6 +101,9 @@ EXAMPLES = '''
instance_id: i-xxxxxx
wait: yes
name: newtest
tags:
Name: newtest
Service: TestService
register: instance
# Basic AMI Creation, without waiting
@ -110,6 +116,23 @@ EXAMPLES = '''
name: newtest
register: instance
# AMI Creation, with a custom root-device size and another EBS attached
- ec2_ami
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx
name: newtest
device_mapping:
- device_name: /dev/sda1
size: XXX
delete_on_termination: true
volume_type: gp2
- device_name: /dev/sdb
size: YYY
delete_on_termination: false
volume_type: gp2
register: instance
# Deregister/Delete AMI
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
@ -136,6 +159,7 @@ import time
try:
import boto
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@ -155,6 +179,8 @@ def create_image(module, ec2):
wait_timeout = int(module.params.get('wait_timeout'))
description = module.params.get('description')
no_reboot = module.params.get('no_reboot')
device_mapping = module.params.get('device_mapping')
tags = module.params.get('tags')
try:
params = {'instance_id': instance_id,
@ -162,9 +188,29 @@ def create_image(module, ec2):
'description': description,
'no_reboot': no_reboot}
if device_mapping:
bdm = BlockDeviceMapping()
for device in device_mapping:
if 'device_name' not in device:
module.fail_json(msg = 'Device name must be set for volume')
device_name = device['device_name']
del device['device_name']
bd = BlockDeviceType(**device)
bdm[device_name] = bd
params['block_device_mapping'] = bdm
image_id = ec2.create_image(**params)
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if e.error_code == 'InvalidAMIName.Duplicate':
images = ec2.get_all_images()
for img in images:
if img.name == name:
module.exit_json(msg="AMI name already present", image_id=img.id, state=img.state, changed=False)
sys.exit(0)
else:
module.fail_json(msg="Error in retrieving duplicate AMI details")
else:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
# Wait until the image is recognized. EC2 API has eventual consistency,
# such that a successful CreateImage API call doesn't guarantee the success
@ -190,6 +236,12 @@ def create_image(module, ec2):
# waiting took too long
module.fail_json(msg = "timed out waiting for image to be created")
if tags:
try:
ec2.create_tags(image_id, tags)
except boto.exception.EC2ResponseError, e:
module.fail_json(msg = "Image tagging failed => %s: %s" % (e.error_code, e.error_message))
module.exit_json(msg="AMI creation operation complete", image_id=image_id, state=img.state, changed=True)
@ -241,6 +293,8 @@ def main():
description = dict(default=""),
no_reboot = dict(default=False, type="bool"),
state = dict(default='present'),
device_mapping = dict(type='list'),
tags = dict(type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
@ -273,4 +327,3 @@ from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -0,0 +1,302 @@
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_ami_find
version_added: 2.0
short_description: Searches for AMIs to obtain the AMI ID and other information
description:
- Returns list of matching AMIs with AMI ID, along with other useful information
- Can search AMIs with different owners
- Can search by matching tag(s), by AMI name and/or other criteria
- Results can be sorted and sliced
author: "Tom Bamford (@tombamford)"
notes:
- This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com.
- See the example below for a suggestion of how to search by distro/release.
options:
region:
description:
- The AWS region to use.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
owner:
description:
- Search AMIs owned by the specified owner
- Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
- If not specified, all EC2 AMIs in the specified region will be searched.
- You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\.
required: false
default: null
ami_id:
description:
- An AMI ID to match.
default: null
required: false
ami_tags:
description:
- A hash/dictionary of tags to match for the AMI.
default: null
required: false
architecture:
description:
- An architecture type to match (e.g. x86_64).
default: null
required: false
hypervisor:
description:
- A hypervisor type type to match (e.g. xen).
default: null
required: false
is_public:
description:
- Whether or not the image(s) are public.
choices: ['yes', 'no']
default: null
required: false
name:
description:
- An AMI name to match.
default: null
required: false
platform:
description:
- Platform type to match.
default: null
required: false
sort:
description:
- Optional attribute which with to sort the results.
- If specifying 'tag', the 'tag_name' parameter is required.
choices: ['name', 'description', 'tag']
default: null
required: false
sort_tag:
description:
- Tag name with which to sort results.
- Required when specifying 'sort=tag'.
default: null
required: false
sort_order:
description:
- Order in which to sort results.
- Only used when the 'sort' parameter is specified.
choices: ['ascending', 'descending']
default: 'ascending'
required: false
sort_start:
description:
- Which result to start with (when sorting).
- Corresponds to Python slice notation.
default: null
required: false
sort_end:
description:
- Which result to end with (when sorting).
- Corresponds to Python slice notation.
default: null
required: false
state:
description:
- AMI state to match.
default: 'available'
required: false
virtualization_type:
description:
- Virtualization type to match (e.g. hvm).
default: null
required: false
no_result_action:
description:
- What to do when no results are found.
- "'success' reports success and returns an empty array"
- "'fail' causes the module to report failure"
choices: ['success', 'fail']
default: 'success'
required: false
requirements:
- "python >= 2.6"
- boto
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Search for the AMI tagged "project:website"
- ec2_ami_find:
owner: self
ami_tags:
project: website
no_result_action: fail
register: ami_find
# Search for the latest Ubuntu 14.04 AMI
- ec2_ami_find:
name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"
owner: 099720109477
sort: name
sort_order: descending
sort_end: 1
register: ami_find
# Launch an EC2 instance
- ec2:
image: "{{ ami_find.results[0].ami_id }}"
instance_type: m3.medium
key_name: mykey
wait: yes
'''
try:
import boto.ec2
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
import json
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
region = dict(required=True,
aliases = ['aws_region', 'ec2_region']),
owner = dict(required=False, default=None),
ami_id = dict(required=False),
ami_tags = dict(required=False, type='dict',
aliases = ['search_tags', 'image_tags']),
architecture = dict(required=False),
hypervisor = dict(required=False),
is_public = dict(required=False),
name = dict(required=False),
platform = dict(required=False),
sort = dict(required=False, default=None,
choices=['name', 'description', 'tag']),
sort_tag = dict(required=False),
sort_order = dict(required=False, default='ascending',
choices=['ascending', 'descending']),
sort_start = dict(required=False),
sort_end = dict(required=False),
state = dict(required=False, default='available'),
virtualization_type = dict(required=False),
no_result_action = dict(required=False, default='success',
choices = ['success', 'fail']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module, install via pip or your package manager')
ami_id = module.params.get('ami_id')
ami_tags = module.params.get('ami_tags')
architecture = module.params.get('architecture')
hypervisor = module.params.get('hypervisor')
is_public = module.params.get('is_public')
name = module.params.get('name')
owner = module.params.get('owner')
platform = module.params.get('platform')
sort = module.params.get('sort')
sort_tag = module.params.get('sort_tag')
sort_order = module.params.get('sort_order')
sort_start = module.params.get('sort_start')
sort_end = module.params.get('sort_end')
state = module.params.get('state')
virtualization_type = module.params.get('virtualization_type')
no_result_action = module.params.get('no_result_action')
filter = {'state': state}
if ami_id:
filter['image_id'] = ami_id
if ami_tags:
for tag in ami_tags:
filter['tag:'+tag] = ami_tags[tag]
if architecture:
filter['architecture'] = architecture
if hypervisor:
filter['hypervisor'] = hypervisor
if is_public:
filter['is_public'] = is_public
if name:
filter['name'] = name
if platform:
filter['platform'] = platform
if virtualization_type:
filter['virtualization_type'] = virtualization_type
ec2 = ec2_connect(module)
images_result = ec2.get_all_images(owners=owner, filters=filter)
if no_result_action == 'fail' and len(images_result) == 0:
module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter))
results = []
for image in images_result:
data = {
'ami_id': image.id,
'architecture': image.architecture,
'description': image.description,
'is_public': image.is_public,
'name': image.name,
'owner_id': image.owner_id,
'platform': image.platform,
'root_device_name': image.root_device_name,
'root_device_type': image.root_device_type,
'state': image.state,
'tags': image.tags,
'virtualization_type': image.virtualization_type,
}
if image.kernel_id:
data['kernel_id'] = image.kernel_id
if image.ramdisk_id:
data['ramdisk_id'] = image.ramdisk_id
results.append(data)
if sort == 'tag':
if not sort_tag:
module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
elif sort:
results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
try:
if sort and sort_start and sort_end:
results = results[int(sort_start):int(sort_end)]
elif sort and sort_start:
results = results[int(sort_start):]
elif sort and sort_end:
results = results[:int(sort_end)]
except TypeError:
module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
module.exit_json(results=results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()

@ -21,7 +21,7 @@ description:
- Can create or delete AWS Autoscaling Groups
- Works with the ec2_lc module to manage Launch Configurations
version_added: "1.6"
author: Gareth Rushgrove
author: "Gareth Rushgrove (@garethr)"
options:
state:
description:
@ -58,7 +58,7 @@ options:
required: false
replace_all_instances:
description:
- In a rolling fashion, replace all instances with an old launch configuration with one from the current launch configuraiton.
- In a rolling fashion, replace all instances with an old launch configuration with one from the current launch configuration.
required: false
version_added: "1.8"
default: False
@ -190,9 +190,13 @@ to "replace_instances":
'''
import time
import logging as log
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
log.getLogger('boto').setLevel(log.CRITICAL)
#log.basicConfig(filename='/tmp/ansible_ec2_asg.log',level=log.DEBUG, format='%(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
try:
import boto.ec2.autoscale
@ -265,8 +269,71 @@ def get_properties(autoscaling_group):
if getattr(autoscaling_group, "tags", None):
properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags)
return properties
def elb_dreg(asg_connection, module, group_name, instance_id):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
as_group = asg_connection.get_all_groups(names=[group_name])[0]
wait_timeout = module.params.get('wait_timeout')
props = get_properties(as_group)
count = 1
if as_group.load_balancers and as_group.health_check_type == 'ELB':
try:
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
else:
return
exists = True
for lb in as_group.load_balancers:
elb_connection.deregister_instances(lb, instance_id)
log.debug("De-registering {0} from ELB {1}".format(instance_id, lb))
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
count = 0
for lb in as_group.load_balancers:
lb_instances = elb_connection.describe_instance_health(lb)
for i in lb_instances:
if i.instance_id == instance_id and i.state == "InService":
count += 1
log.debug("{0}: {1}, {2}".format(i.instance_id, i.state, i.description))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime()))
def elb_healthy(asg_connection, elb_connection, module, group_name):
healthy_instances = []
as_group = asg_connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(instance)
log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances))
log.debug("ELB instance status:")
for lb in as_group.load_balancers:
# we catch a race condition that sometimes happens if the instance exists in the ASG
# but has not yet show up in the ELB
try:
lb_instances = elb_connection.describe_instance_health(lb, instances=instances)
except boto.exception.InvalidInstance, e:
pass
for i in lb_instances:
if i.state == "InService":
healthy_instances.append(i.instance_id)
log.debug("{0}: {1}".format(i.instance_id, i.state))
return len(healthy_instances)
def wait_for_elb(asg_connection, module, group_name):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
@ -277,36 +344,23 @@ def wait_for_elb(asg_connection, module, group_name):
as_group = asg_connection.get_all_groups(names=[group_name])[0]
if as_group.load_balancers and as_group.health_check_type == 'ELB':
log.debug("Waiting for ELB to consider intances healthy.")
try:
elb_connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
wait_timeout = time.time() + wait_timeout
healthy_instances = {}
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
while len(healthy_instances.keys()) < as_group.min_size and wait_timeout > time.time():
as_group = asg_connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
# get healthy, inservice instances from ASG
instances = []
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(instance)
for lb in as_group.load_balancers:
# we catch a race condition that sometimes happens if the instance exists in the ASG
# but has not yet show up in the ELB
try:
lb_instances = elb_connection.describe_instance_health(lb, instances=instances)
except boto.exception.InvalidInstance, e:
pass
for i in lb_instances:
if i.state == "InService":
healthy_instances[i.instance_id] = i.state
while healthy_instances < as_group.min_size and wait_timeout > time.time():
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances))
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime())
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
def create_autoscaling_group(connection, module):
group_name = module.params.get('name')
@ -364,7 +418,7 @@ def create_autoscaling_group(connection, module):
try:
connection.create_auto_scaling_group(ag)
if wait_for_instances == True:
wait_for_new_instances(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
@ -430,7 +484,7 @@ def create_autoscaling_group(connection, module):
module.fail_json(msg=str(e))
if wait_for_instances == True:
wait_for_new_instances(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
wait_for_elb(connection, module, group_name)
try:
as_group = connection.get_all_groups(names=[group_name])[0]
@ -471,6 +525,15 @@ def get_chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
def update_size(group, max_size, min_size, dc):
log.debug("setting ASG sizes")
log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size ))
group.max_size = max_size
group.min_size = min_size
group.desired_capacity = dc
group.update()
def replace(connection, module):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
@ -478,91 +541,191 @@ def replace(connection, module):
max_size = module.params.get('max_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
# FIXME: we need some more docs about this feature
lc_check = module.params.get('lc_check')
replace_instances = module.params.get('replace_instances')
as_group = connection.get_all_groups(names=[group_name])[0]
wait_for_new_instances(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
props = get_properties(as_group)
instances = props['instances']
replaceable = 0
if replace_instances:
instances = replace_instances
for k in props['instance_facts'].keys():
if k in instances:
if props['instance_facts'][k]['launch_config_name'] != props['launch_config_name']:
replaceable += 1
if replaceable == 0:
# check to see if instances are replaceable if checking launch configs
new_instances, old_instances = get_instances_by_lc(props, lc_check, instances)
num_new_inst_needed = desired_capacity - len(new_instances)
if lc_check:
if num_new_inst_needed == 0 and old_instances:
log.debug("No new instances needed, but old instances are present. Removing old instances")
terminate_batch(connection, module, old_instances, instances, True)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
changed = True
return(changed, props)
# we don't want to spin up extra instances if not necessary
if num_new_inst_needed < batch_size:
log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
batch_size = num_new_inst_needed
if not old_instances:
changed = False
return(changed, props)
# set temporary settings and wait for them to be reached
# This should get overriden if the number of instances left is less than the batch size.
as_group = connection.get_all_groups(names=[group_name])[0]
as_group.max_size = max_size + batch_size
as_group.min_size = min_size + batch_size
as_group.desired_capacity = desired_capacity + batch_size
as_group.update()
wait_for_new_instances(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
update_size(as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size)
wait_for_new_inst(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
instances = props['instances']
if replace_instances:
instances = replace_instances
log.debug("beginning main loop")
for i in get_chunks(instances, batch_size):
terminate_batch(connection, module, i)
wait_for_new_instances(module, connection, group_name, wait_timeout, as_group.min_size, 'viable_instances')
# break out of this loop if we have enough new instances
break_early, desired_size, term_instances = terminate_batch(connection, module, i, instances, False)
wait_for_term_inst(connection, module, term_instances)
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, 'viable_instances')
wait_for_elb(connection, module, group_name)
as_group = connection.get_all_groups(names=[group_name])[0]
# return settings to normal
as_group.max_size = max_size
as_group.min_size = min_size
as_group.desired_capacity = desired_capacity
as_group.update()
if break_early:
log.debug("breaking loop")
break
update_size(as_group, max_size, min_size, desired_capacity)
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
log.debug("Rolling update complete.")
changed=True
return(changed, asg_properties)
def terminate_batch(connection, module, replace_instances):
group_name = module.params.get('name')
wait_timeout = int(module.params.get('wait_timeout'))
lc_check = module.params.get('lc_check')
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
def get_instances_by_lc(props, lc_check, initial_instances):
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
new_instances = []
old_instances = []
# old instances are those that have the old launch config
if lc_check:
for i in props['instances']:
if props['instance_facts'][i]['launch_config_name'] == props['launch_config_name']:
new_instances.append(i)
else:
old_instances.append(i)
else:
log.debug("Comparing initial instances with current: {0}".format(initial_instances))
for i in props['instances']:
if i not in initial_instances:
new_instances.append(i)
else:
old_instances.append(i)
log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances))
log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances))
return new_instances, old_instances
def list_purgeable_instances(props, lc_check, replace_instances, initial_instances):
instances_to_terminate = []
instances = ( inst_id for inst_id in replace_instances if inst_id in props['instances'])
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
if lc_check:
for i in instances:
if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
old_instances.append(i)
instances_to_terminate.append(i)
else:
old_instances = instances
for i in instances:
if i in initial_instances:
instances_to_terminate.append(i)
return instances_to_terminate
# set all instances given to unhealthy
for instance_id in old_instances:
connection.set_instance_health(instance_id,'Unhealthy')
def terminate_batch(connection, module, replace_instances, initial_instances, leftovers=False):
batch_size = module.params.get('replace_batch_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
group_name = module.params.get('name')
wait_timeout = int(module.params.get('wait_timeout'))
lc_check = module.params.get('lc_check')
decrement_capacity = False
break_loop = False
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
desired_size = as_group.min_size
new_instances, old_instances = get_instances_by_lc(props, lc_check, initial_instances)
num_new_inst_needed = desired_capacity - len(new_instances)
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances)
log.debug("new instances needed: {0}".format(num_new_inst_needed))
log.debug("new instances: {0}".format(new_instances))
log.debug("old instances: {0}".format(old_instances))
log.debug("batch instances: {0}".format(",".join(instances_to_terminate)))
if num_new_inst_needed == 0:
decrement_capacity = True
if as_group.min_size != min_size:
as_group.min_size = min_size
as_group.update()
log.debug("Updating minimum size back to original of {0}".format(min_size))
#if are some leftover old instances, but we are already at capacity with new ones
# we don't want to decrement capacity
if leftovers:
decrement_capacity = False
break_loop = True
instances_to_terminate = old_instances
desired_size = min_size
log.debug("No new instances needed")
if num_new_inst_needed < batch_size and num_new_inst_needed !=0 :
instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
decrement_capacity = False
break_loop = False
log.debug("{0} new instances needed".format(num_new_inst_needed))
log.debug("decrementing capacity: {0}".format(decrement_capacity))
for instance_id in instances_to_terminate:
elb_dreg(connection, module, group_name, instance_id)
log.debug("terminating instance: {0}".format(instance_id))
connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity)
# we wait to make sure the machines we marked as Unhealthy are
# no longer in the list
return break_loop, desired_size, instances_to_terminate
def wait_for_term_inst(connection, module, term_instances):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('name')
lc_check = module.params.get('lc_check')
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
count = 1
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
log.debug("waiting for instances to terminate")
count = 0
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
instance_facts = props['instance_facts']
instances = ( i for i in instance_facts if i in old_instances)
instances = ( i for i in instance_facts if i in term_instances)
for i in instances:
if ( instance_facts[i]['lifecycle_state'] == 'Terminating'
or instance_facts[i]['health_status'] == 'Unhealthy' ):
lifecycle = instance_facts[i]['lifecycle_state']
health = instance_facts[i]['health_status']
log.debug("Instance {0} has state of {1},{2}".format(i,lifecycle,health ))
if lifecycle == 'Terminating' or healthy == 'Unhealthy':
count += 1
time.sleep(10)
@ -570,21 +733,24 @@ def terminate_batch(connection, module, replace_instances):
# waiting took too long
module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime())
def wait_for_new_instances(module, connection, group_name, wait_timeout, desired_size, prop):
def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop):
# make sure we have the latest stats after that last loop.
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
# now we make sure that we have enough instances in a viable state
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and desired_size > props[prop]:
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
time.sleep(10)
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for new instances to become viable. %s" % time.asctime())
log.debug("Reached {0}: {1}".format(prop, desired_size))
return props
def main():

@ -37,25 +37,21 @@ options:
version_added: "1.4"
reuse_existing_ip_allowed:
description:
- Reuse an EIP that is not associated to an instance (when available), instead of allocating a new one.
- Reuse an EIP that is not associated to an instance (when available),'''
''' instead of allocating a new one.
required: false
default: false
version_added: "1.6"
wait_timeout:
description:
- how long to wait in seconds for newly provisioned EIPs to become available
default: 300
version_added: "1.7"
extends_documentation_fragment: aws
author: Lorin Hochstein <lorin@nimbisservices.com>
author: "Lorin Hochstein (@lorin) <lorin@nimbisservices.com>"
notes:
- This module will return C(public_ip) on success, which will contain the
public IP address associated with the instance.
- There may be a delay between the time the Elastic IP is assigned and when
the cloud instance is reachable via the new address. Use wait_for and pause
to delay further playbook execution until the instance is reachable, if
necessary.
the cloud instance is reachable via the new address. Use wait_for and
pause to delay further playbook execution until the instance is reachable,
if necessary.
'''
EXAMPLES = '''
@ -74,8 +70,12 @@ EXAMPLES = '''
- name: output the IP
debug: msg="Allocated IP is {{ eip.public_ip }}"
- name: another way of allocating an elastic IP without associating it to anything
ec2_eip: state='present'
- name: provision new instances with ec2
ec2: keypair=mykey instance_type=c1.medium image=ami-40603AD1 wait=yes group=webserver count=3
ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes'''
''' group=webserver count=3
register: ec2
- name: associate new elastic IPs with each of the instances
ec2_eip: "instance_id={{ item }}"
@ -95,155 +95,164 @@ except ImportError:
HAS_BOTO = False
wait_timeout = 0
class EIPException(Exception):
pass
def associate_ip_and_instance(ec2, address, instance_id, module):
if ip_is_associated_with_instance(ec2, address.public_ip, instance_id, module):
module.exit_json(changed=False, public_ip=address.public_ip)
# If we're in check mode, nothing else to do
if module.check_mode:
module.exit_json(changed=True)
def associate_ip_and_instance(ec2, address, instance_id, check_mode):
if address_is_associated_with_instance(ec2, address, instance_id):
return {'changed': False}
try:
if address.domain == "vpc":
res = ec2.associate_address(instance_id, allocation_id=address.allocation_id)
# If we're in check mode, nothing else to do
if not check_mode:
if address.domain == 'vpc':
res = ec2.associate_address(instance_id,
allocation_id=address.allocation_id)
else:
res = ec2.associate_address(instance_id, public_ip=address.public_ip)
except boto.exception.EC2ResponseError, e:
module.fail_json(msg=str(e))
if res:
module.exit_json(changed=True, public_ip=address.public_ip)
else:
module.fail_json(msg="association failed")
res = ec2.associate_address(instance_id,
public_ip=address.public_ip)
if not res:
raise EIPException('association failed')
return {'changed': True}
def disassociate_ip_and_instance(ec2, address, instance_id, module):
if not ip_is_associated_with_instance(ec2, address.public_ip, instance_id, module):
module.exit_json(changed=False, public_ip=address.public_ip)
# If we're in check mode, nothing else to do
if module.check_mode:
module.exit_json(changed=True)
def disassociate_ip_and_instance(ec2, address, instance_id, check_mode):
if not address_is_associated_with_instance(ec2, address, instance_id):
return {'changed': False}
try:
if address.domain == "vpc":
res = ec2.disassociate_address(association_id=address.association_id)
# If we're in check mode, nothing else to do
if not check_mode:
if address.domain == 'vpc':
res = ec2.disassociate_address(
association_id=address.association_id)
else:
res = ec2.disassociate_address(public_ip=address.public_ip)
except boto.exception.EC2ResponseError, e:
module.fail_json(msg=str(e))
if res:
module.exit_json(changed=True)
else:
module.fail_json(msg="disassociation failed")
def find_address(ec2, public_ip, module):
""" Find an existing Elastic IP address """
if wait_timeout != 0:
timeout = time.time() + wait_timeout
while timeout > time.time():
try:
addresses = ec2.get_all_addresses([public_ip])
break
except boto.exception.EC2ResponseError, e:
if "Address '%s' not found." % public_ip in e.message :
pass
else:
module.fail_json(msg=str(e.message))
time.sleep(5)
if timeout <= time.time():
module.fail_json(msg = "wait for EIPs timeout on %s" % time.asctime())
else:
try:
addresses = ec2.get_all_addresses([public_ip])
except boto.exception.EC2ResponseError, e:
module.fail_json(msg=str(e.message))
if not res:
raise EIPException('disassociation failed')
return addresses[0]
return {'changed': True}
def ip_is_associated_with_instance(ec2, public_ip, instance_id, module):
def _find_address_by_ip(ec2, public_ip):
try:
return ec2.get_all_addresses([public_ip])[0]
except boto.exception.EC2ResponseError as e:
if "Address '{}' not found.".format(public_ip) not in e.message:
raise
def _find_address_by_instance_id(ec2, instance_id):
addresses = ec2.get_all_addresses(None, {'instance-id': instance_id})
if addresses:
return addresses[0]
def find_address(ec2, public_ip, instance_id):
""" Find an existing Elastic IP address """
if public_ip:
return _find_address_by_ip(ec2, public_ip)
elif instance_id:
return _find_address_by_instance_id(ec2, instance_id)
def address_is_associated_with_instance(ec2, address, instance_id):
""" Check if the elastic IP is currently associated with the instance """
address = find_address(ec2, public_ip, module)
if address:
return address.instance_id == instance_id
else:
return False
return address and address.instance_id == instance_id
return False
def allocate_address(ec2, domain, module, reuse_existing_ip_allowed):
def allocate_address(ec2, domain, reuse_existing_ip_allowed):
""" Allocate a new elastic IP address (when needed) and return it """
# If we're in check mode, nothing else to do
if module.check_mode:
module.exit_json(change=True)
if reuse_existing_ip_allowed:
if domain:
domain_filter = { 'domain' : domain }
else:
domain_filter = { 'domain' : 'standard' }
all_addresses = ec2.get_all_addresses(filters=domain_filter)
unassociated_addresses = filter(lambda a: a.instance_id == "", all_addresses)
if unassociated_addresses:
address = unassociated_addresses[0];
else:
address = ec2.allocate_address(domain=domain)
else:
address = ec2.allocate_address(domain=domain)
domain_filter = {'domain': domain or 'standard'}
all_addresses = ec2.get_all_addresses(filters=domain_filter)
return address
unassociated_addresses = [a for a in all_addresses
if not a.instance_id]
if unassociated_addresses:
return unassociated_addresses[0]
return ec2.allocate_address(domain=domain)
def release_address(ec2, public_ip, module):
def release_address(ec2, address, check_mode):
""" Release a previously allocated elastic IP address """
address = find_address(ec2, public_ip, module)
# If we're in check mode, nothing else to do
if module.check_mode:
module.exit_json(change=True)
res = address.release()
if res:
module.exit_json(changed=True)
else:
module.fail_json(msg="release failed")
if not check_mode:
if not address.release():
EIPException('release failed')
return {'changed': True}
def find_instance(ec2, instance_id, module):
def find_instance(ec2, instance_id):
""" Attempt to find the EC2 instance and return it """
try:
reservations = ec2.get_all_reservations(instance_ids=[instance_id])
except boto.exception.EC2ResponseError, e:
module.fail_json(msg=str(e))
reservations = ec2.get_all_reservations(instance_ids=[instance_id])
if len(reservations) == 1:
instances = reservations[0].instances
if len(instances) == 1:
return instances[0]
module.fail_json(msg="could not find instance" + instance_id)
raise EIPException("could not find instance" + instance_id)
def ensure_present(ec2, domain, address, instance_id,
reuse_existing_ip_allowed, check_mode):
changed = False
# Return the EIP object since we've been given a public IP
if not address:
if check_mode:
return {'changed': True}
address = allocate_address(ec2, domain, reuse_existing_ip_allowed)
changed = True
if instance_id:
# Allocate an IP for instance since no public_ip was provided
instance = find_instance(ec2, instance_id)
if instance.vpc_id:
domain = 'vpc'
# Associate address object (provided or allocated) with instance
assoc_result = associate_ip_and_instance(ec2, address, instance_id,
check_mode)
changed = changed or assoc_result['changed']
return {'changed': changed, 'public_ip': address.public_ip}
def ensure_absent(ec2, domain, address, instance_id, check_mode):
if not address:
return {'changed': False}
# disassociating address from instance
if instance_id:
return disassociate_ip_and_instance(ec2, address, instance_id,
check_mode)
# releasing address
else:
return release_address(ec2, address, check_mode)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance_id = dict(required=False),
public_ip = dict(required=False, aliases= ['ip']),
state = dict(required=False, default='present',
choices=['present', 'absent']),
in_vpc = dict(required=False, type='bool', default=False),
reuse_existing_ip_allowed = dict(required=False, type='bool', default=False),
wait_timeout = dict(default=300),
)
)
instance_id=dict(required=False),
public_ip=dict(required=False, aliases=['ip']),
state=dict(required=False, default='present',
choices=['present', 'absent']),
in_vpc=dict(required=False, type='bool', default=False),
reuse_existing_ip_allowed=dict(required=False, type='bool',
default=False),
wait_timeout=dict(default=300),
))
module = AnsibleModule(
argument_spec=argument_spec,
@ -259,46 +268,27 @@ def main():
public_ip = module.params.get('public_ip')
state = module.params.get('state')
in_vpc = module.params.get('in_vpc')
domain = "vpc" if in_vpc else None
domain = 'vpc' if in_vpc else None
reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed')
new_eip_timeout = int(module.params.get('wait_timeout'))
if state == 'present':
# Allocate an EIP and exit
if not instance_id and not public_ip:
address = allocate_address(ec2, domain, module, reuse_existing_ip_allowed)
module.exit_json(changed=True, public_ip=address.public_ip)
# Return the EIP object since we've been given a public IP
if public_ip:
address = find_address(ec2, public_ip, module)
# Allocate an IP for instance since no public_ip was provided
if instance_id and not public_ip:
instance = find_instance(ec2, instance_id, module)
if instance.vpc_id:
domain = "vpc"
address = allocate_address(ec2, domain, module, reuse_existing_ip_allowed)
# overriding the timeout since this is a a newly provisioned ip
global wait_timeout
wait_timeout = new_eip_timeout
# Associate address object (provided or allocated) with instance
associate_ip_and_instance(ec2, address, instance_id, module)
try:
address = find_address(ec2, public_ip, instance_id)
else:
#disassociating address from instance
if instance_id:
address = find_address(ec2, public_ip, module)
disassociate_ip_and_instance(ec2, address, instance_id, module)
#releasing address
if state == 'present':
result = ensure_present(ec2, domain, address, instance_id,
reuse_existing_ip_allowed,
module.check_mode)
else:
release_address(ec2, public_ip, module)
result = ensure_absent(ec2, domain, address, instance_id, module.check_mode)
except (boto.exception.EC2ResponseError, EIPException) as e:
module.fail_json(msg=str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
from ansible.module_utils.basic import * # noqa
from ansible.module_utils.ec2 import * # noqa
if __name__ == '__main__':
main()

@ -25,7 +25,7 @@ description:
if state=absent is passed as an argument.
- Will be marked changed when called only if there are ELBs found to operate on.
version_added: "1.2"
author: John Jarvis
author: "John Jarvis (@jarv)"
options:
state:
description:
@ -103,6 +103,7 @@ import time
try:
import boto
import boto.ec2
import boto.ec2.autoscale
import boto.ec2.elb
from boto.regioninfo import RegionInfo
HAS_BOTO = True
@ -129,9 +130,9 @@ class ElbManager:
for lb in self.lbs:
initial_state = self._get_instance_health(lb)
if initial_state is None:
# The instance isn't registered with this ELB so just
# return unchanged
return
# Instance isn't registered with this load
# balancer. Ignore it and try the next one.
continue
lb.deregister_instances([self.instance_id])
@ -254,6 +255,9 @@ class ElbManager:
for elb lookup instead of returning what elbs
are attached to self.instance_id"""
if not ec2_elbs:
ec2_elbs = self._get_auto_scaling_group_lbs()
try:
elb = connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
@ -272,6 +276,32 @@ class ElbManager:
lbs.append(lb)
return lbs
def _get_auto_scaling_group_lbs(self):
"""Returns a list of ELBs associated with self.instance_id
indirectly through its auto scaling group membership"""
try:
asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
self.module.fail_json(msg=str(e))
asg_instances = asg.get_all_autoscaling_instances([self.instance_id])
if len(asg_instances) > 1:
self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.")
if not asg_instances:
asg_elbs = []
else:
asg_name = asg_instances[0].group_name
asgs = asg.get_all_groups([asg_name])
if len(asg_instances) != 1:
self.module.fail_json(msg="Illegal state, expected one auto scaling group.")
asg_elbs = asgs[0].load_balancers
return asg_elbs
def _get_instance(self):
"""Returns a boto.ec2.InstanceObject for self.instance_id"""
try:

@ -22,7 +22,7 @@ description:
- Will be marked changed when called only if state is changed.
short_description: Creates or destroys Amazon ELB.
version_added: "1.5"
author: Jim Dalton
author: "Jim Dalton (@jsdalton)"
options:
state:
description:
@ -58,7 +58,7 @@ options:
version_added: "1.6"
health_check:
description:
- An associative array of health check configuration settigs (see example)
- An associative array of health check configuration settings (see example)
require: false
default: None
region:
@ -101,12 +101,17 @@ options:
version_added: "1.8"
cross_az_load_balancing:
description:
- Distribute load across all configured Availablity Zones
- Distribute load across all configured Availability Zones
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
version_added: "1.8"
stickiness:
description:
- An associative array of stickness policy settings. Policy will be applied to all listeners ( see example )
required: false
version_added: "2.0"
extends_documentation_fragment: aws
"""
@ -193,7 +198,7 @@ EXAMPLES = """
purge_listeners: no
# Normally, this module will leave availability zones that are enabled
# on the ELB alone. If purge_zones is true, then any extreneous zones
# on the ELB alone. If purge_zones is true, then any extraneous zones
# will be removed
- local_action:
module: ec2_elb_lb
@ -235,9 +240,46 @@ EXAMPLES = """
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
- protocols: http
- load_balancer_port: 80
- instance_port: 80
# Create an ELB with load balanacer stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocols: http
- load_balancer_port: 80
- instance_port: 80
stickiness:
type: loadbalancer
enabled: yes
expiration: 300
# Create an ELB with application stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocols: http
- load_balancer_port: 80
- instance_port: 80
stickiness:
type: application
enabled: yes
cookie: SESSIONID
"""
try:
@ -258,7 +300,8 @@ class ElbManager(object):
zones=None, purge_zones=None, security_group_ids=None,
health_check=None, subnets=None, purge_subnets=None,
scheme="internet-facing", connection_draining_timeout=None,
cross_az_load_balancing=None, region=None, **aws_connect_params):
cross_az_load_balancing=None,
stickiness=None, region=None, **aws_connect_params):
self.module = module
self.name = name
@ -273,6 +316,7 @@ class ElbManager(object):
self.scheme = scheme
self.connection_draining_timeout = connection_draining_timeout
self.cross_az_load_balancing = cross_az_load_balancing
self.stickiness = stickiness
self.aws_connect_params = aws_connect_params
self.region = region
@ -300,6 +344,8 @@ class ElbManager(object):
self._set_connection_draining_timeout()
if self._check_attribute_support('cross_zone_load_balancing'):
self._set_cross_az_load_balancing()
# add sitcky options
self.select_stickiness_policy()
def ensure_gone(self):
"""Destroy the ELB"""
@ -318,6 +364,15 @@ class ElbManager(object):
'status': self.status
}
else:
try:
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
except:
lb_cookie_policy = None
try:
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
except:
app_cookie_policy = None
info = {
'name': check_elb.name,
'dns_name': check_elb.dns_name,
@ -327,7 +382,9 @@ class ElbManager(object):
'subnets': self.subnets,
'scheme': check_elb.scheme,
'hosted_zone_name': check_elb.canonical_hosted_zone_name,
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
'lb_cookie_policy': lb_cookie_policy,
'app_cookie_policy': app_cookie_policy
}
if check_elb.health_check:
@ -361,6 +418,8 @@ class ElbManager(object):
else:
info['cross_az_load_balancing'] = 'no'
# return stickiness info?
return info
def _get_elb(self):
@ -428,7 +487,7 @@ class ElbManager(object):
existing_listener_found = None
for existing_listener in self.elb.listeners:
# Since ELB allows only one listener on each incoming port, a
# single match on the incomping port is all we're looking for
# single match on the incoming port is all we're looking for
if existing_listener[0] == listener['load_balancer_port']:
existing_listener_found = self._api_listener_as_tuple(existing_listener)
break
@ -437,7 +496,7 @@ class ElbManager(object):
# Does it match exactly?
if listener_as_tuple != existing_listener_found:
# The ports are the same but something else is different,
# so we'll remove the exsiting one and add the new one
# so we'll remove the existing one and add the new one
listeners_to_remove.append(existing_listener_found)
listeners_to_add.append(listener_as_tuple)
else:
@ -615,6 +674,103 @@ class ElbManager(object):
attributes.connection_draining.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
def _policy_name(self, policy_type):
return __file__.split('/')[-1].replace('_', '-') + '-' + policy_type
def _create_policy(self, policy_param, policy_meth, policy):
getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy)
def _delete_policy(self, elb_name, policy):
self.elb_conn.delete_lb_policy(elb_name, policy)
def _update_policy(self, policy_param, policy_meth, policy_attr, policy):
self._delete_policy(self.elb.name, policy)
self._create_policy(policy_param, policy_meth, policy)
def _set_listener_policy(self, listeners_dict, policy=[]):
for listener_port in listeners_dict:
if listeners_dict[listener_port].startswith('HTTP'):
self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy)
def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs):
for p in getattr(elb_info.policies, policy_attrs['attr']):
if str(p.__dict__['policy_name']) == str(policy[0]):
if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value']):
self._set_listener_policy(listeners_dict)
self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0])
self.changed = True
break
else:
self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
self.changed = True
self._set_listener_policy(listeners_dict, policy)
def select_stickiness_policy(self):
if self.stickiness:
if 'cookie' in self.stickiness and 'expiration' in self.stickiness:
self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time')
elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0]
d = {}
for listener in elb_info.listeners:
d[listener[0]] = listener[2]
listeners_dict = d
if self.stickiness['type'] == 'loadbalancer':
policy = []
policy_type = 'LBCookieStickinessPolicyType'
if self.stickiness['enabled'] == True:
if 'expiration' not in self.stickiness:
self.module.fail_json(msg='expiration must be set when type is loadbalancer')
policy_attrs = {
'type': policy_type,
'attr': 'lb_cookie_stickiness_policies',
'method': 'create_lb_cookie_stickiness_policy',
'dict_key': 'cookie_expiration_period',
'param_value': self.stickiness['expiration']
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif self.stickiness['enabled'] == False:
if len(elb_info.policies.lb_cookie_stickiness_policies):
if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
else:
self.changed = False
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
elif self.stickiness['type'] == 'application':
policy = []
policy_type = 'AppCookieStickinessPolicyType'
if self.stickiness['enabled'] == True:
if 'cookie' not in self.stickiness:
self.module.fail_json(msg='cookie must be set when type is application')
policy_attrs = {
'type': policy_type,
'attr': 'app_cookie_stickiness_policies',
'method': 'create_app_cookie_stickiness_policy',
'dict_key': 'cookie_name',
'param_value': self.stickiness['cookie']
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif self.stickiness['enabled'] == False:
if len(elb_info.policies.app_cookie_stickiness_policies):
if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
else:
self._set_listener_policy(listeners_dict)
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
protocol = self.health_check['ping_protocol'].upper()
@ -641,7 +797,8 @@ def main():
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
scheme={'default': 'internet-facing', 'required': False},
connection_draining_timeout={'default': None, 'required': False},
cross_az_load_balancing={'default': None, 'required': False}
cross_az_load_balancing={'default': None, 'required': False},
stickiness={'default': None, 'required': False, 'type': 'dict'}
)
)
@ -669,6 +826,7 @@ def main():
scheme = module.params['scheme']
connection_draining_timeout = module.params['connection_draining_timeout']
cross_az_load_balancing = module.params['cross_az_load_balancing']
stickiness = module.params['stickiness']
if state == 'present' and not listeners:
module.fail_json(msg="At least one port is required for ELB creation")
@ -680,6 +838,7 @@ def main():
purge_zones, security_group_ids, health_check,
subnets, purge_subnets, scheme,
connection_draining_timeout, cross_az_load_balancing,
stickiness,
region=region, **aws_connect_params)
# check for unsupported attributes for this version of boto

@ -36,7 +36,7 @@ description:
The module must be called from within the EC2 instance itself.
notes:
- Parameters to filter on ec2_facts may be added later.
author: "Silviu Dicu <silviudicu@gmail.com>"
author: "Silviu Dicu (@silviud) <silviudicu@gmail.com>"
'''
EXAMPLES = '''

@ -5,6 +5,7 @@
DOCUMENTATION = '''
---
module: ec2_group
author: "Andrew de Quincey (@adq)"
version_added: "1.3"
short_description: maintain an ec2 VPC security group.
description:
@ -24,15 +25,11 @@ options:
required: false
rules:
description:
- List of firewall inbound rules to enforce in this group (see'''
''' example). If none are supplied, a default all-out rule is assumed.'''
''' If an empty list is supplied, no inbound rules will be enabled.
- List of firewall inbound rules to enforce in this group (see example). If none are supplied, a default all-out rule is assumed. If an empty list is supplied, no inbound rules will be enabled.
required: false
rules_egress:
description:
- List of firewall outbound rules to enforce in this group (see'''
''' example). If none are supplied, a default all-out rule is assumed.'''
''' If an empty list is supplied, no outbound rules will be enabled.
- List of firewall outbound rules to enforce in this group (see example). If none are supplied, a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
required: false
version_added: "1.6"
region:
@ -90,6 +87,14 @@ EXAMPLES = '''
from_port: 22
to_port: 22
cidr_ip: 10.0.0.0/8
- proto: tcp
from_port: 443
to_port: 443
group_id: amazon-elb/sg-87654321/amazon-elb-sg
- proto: tcp
from_port: 3306
to_port: 3306
group_id: 123412341234/sg-87654321/exact-name-of-sg
- proto: udp
from_port: 10050
to_port: 10050
@ -113,6 +118,7 @@ EXAMPLES = '''
try:
import boto.ec2
from boto.ec2.securitygroup import SecurityGroup
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@ -122,6 +128,11 @@ def make_rule_key(prefix, rule, group_id, cidr_ip):
"""Creates a unique key for an individual group rule"""
if isinstance(rule, dict):
proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')]
#fix for 11177
if proto not in ['icmp', 'tcp', 'udp'] and from_port == -1 and to_port == -1:
from_port = 'none'
to_port = 'none'
else: # isinstance boto.ec2.securitygroup.IPPermissions
proto, from_port, to_port = [getattr(rule, x, None) for x in ('ip_protocol', 'from_port', 'to_port')]
@ -135,6 +146,22 @@ def addRulesToLookup(rules, prefix, dict):
dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = (rule, grant)
def validate_rule(module, rule):
VALID_PARAMS = ('cidr_ip',
'group_id', 'group_name', 'group_desc',
'proto', 'from_port', 'to_port')
for k in rule:
if k not in VALID_PARAMS:
module.fail_json(msg='Invalid rule parameter \'{}\''.format(k))
if 'group_id' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_id OR cidr_ip, not both')
elif 'group_name' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_name OR cidr_ip, not both')
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg='Specify group_id OR group_name, not both')
def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id):
"""
Returns tuple of (group_id, ip) after validating rule params.
@ -148,6 +175,7 @@ def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id):
group_id or a non-None ip range.
"""
FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)'
group_id = None
group_name = None
ip = None
@ -158,16 +186,22 @@ def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id):
module.fail_json(msg="Specify group_name OR cidr_ip, not both")
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg="Specify group_id OR group_name, not both")
elif 'group_id' in rule and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
# this is a foreign Security Group. Since you can't fetch it you must create an instance of it
owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
group_instance = SecurityGroup(owner_id=owner_id, name=group_name, id=group_id)
groups[group_id] = group_instance
groups[group_name] = group_instance
elif 'group_id' in rule:
group_id = rule['group_id']
elif 'group_name' in rule:
group_name = rule['group_name']
if group_name in groups:
group_id = groups[group_name].id
elif group_name == name:
if group_name == name:
group_id = group.id
groups[group_id] = group
groups[group_name] = group
elif group_name in groups:
group_id = groups[group_name].id
else:
if not rule.get('group_desc', '').strip():
module.fail_json(msg="group %s will be automatically created by rule %s and no description was provided" % (group_name, rule))
@ -223,7 +257,12 @@ def main():
groups = {}
for curGroup in ec2.get_all_security_groups():
groups[curGroup.id] = curGroup
groups[curGroup.name] = curGroup
if curGroup.name in groups:
# Prioritise groups from the current VPC
if vpc_id is None or curGroup.vpc_id == vpc_id:
groups[curGroup.name] = curGroup
else:
groups[curGroup.name] = curGroup
if curGroup.name == name and (vpc_id is None or curGroup.vpc_id == vpc_id):
group = curGroup
@ -286,6 +325,8 @@ def main():
# Now, go through all provided rules and ensure they are there.
if rules is not None:
for rule in rules:
validate_rule(module, rule)
group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id)
if target_group_created:
changed = True
@ -314,6 +355,11 @@ def main():
for (rule, grant) in groupRules.itervalues() :
grantGroup = None
if grant.group_id:
if grant.owner_id != group.owner_id:
# this is a foreign Security Group. Since you can't fetch it you must create an instance of it
group_instance = SecurityGroup(owner_id=grant.owner_id, name=grant.name, id=grant.group_id)
groups[grant.group_id] = group_instance
groups[grant.name] = group_instance
grantGroup = groups[grant.group_id]
if not module.check_mode:
group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup)
@ -326,6 +372,8 @@ def main():
# Now, go through all provided rules and ensure they are there.
if rules_egress is not None:
for rule in rules_egress:
validate_rule(module, rule)
group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id)
if target_group_created:
changed = True

@ -46,7 +46,7 @@ options:
version_added: "1.6"
extends_documentation_fragment: aws
author: Vincent Viallet
author: "Vincent Viallet (@zbal)"
'''
EXAMPLES = '''

@ -26,7 +26,7 @@ notes:
after it is changed will not modify the launch configuration on AWS. You must create a new config and assign
it to the ASG instead."
version_added: "1.6"
author: Gareth Rushgrove
author: "Gareth Rushgrove (@garethr)"
options:
state:
description:
@ -126,6 +126,12 @@ EXAMPLES = '''
key_name: default
security_groups: ['group', 'group2' ]
instance_type: t1.micro
volumes:
- device_name: /dev/sda1
volume_size: 100
device_type: io1
iops: 3000
delete_on_termination: true
'''

@ -21,7 +21,7 @@ description:
- Can create or delete AWS metric alarms
- Metrics you wish to alarm on must already exist
version_added: "1.6"
author: Zacharie Eakin
author: "Zacharie Eakin (@zeekin)"
options:
state:
description:
@ -29,7 +29,7 @@ options:
required: true
choices: ['present', 'absent']
name:
desciption:
description:
- Unique name for the alarm
required: true
metric:
@ -71,7 +71,7 @@ options:
options: ['Seconds','Microseconds','Milliseconds','Bytes','Kilobytes','Megabytes','Gigabytes','Terabytes','Bits','Kilobits','Megabits','Gigabits','Terabits','Percent','Count','Bytes/Second','Kilobytes/Second','Megabytes/Second','Gigabytes/Second','Terabytes/Second','Bits/Second','Kilobits/Second','Megabits/Second','Gigabits/Second','Terabits/Second','Count/Second','None']
description:
description:
- A longer desciption of the alarm
- A longer description of the alarm
required: false
dimensions:
description:
@ -260,7 +260,7 @@ def main():
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
region=dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS),
region=dict(aliases=['aws_region', 'ec2_region']),
)
)

@ -7,7 +7,7 @@ description:
- Can create or delete scaling policies for autoscaling groups
- Referenced autoscaling groups must already exist
version_added: "1.6"
author: Zacharie Eakin
author: "Zacharie Eakin (@zeekin)"
options:
state:
description:
@ -23,7 +23,7 @@ options:
- Name of the associated autoscaling group
required: true
adjustment_type:
desciption:
description:
- The type of change in capacity of the autoscaling group
required: false
choices: ['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']
@ -88,7 +88,7 @@ def create_scaling_policy(connection, module):
try:
connection.create_scaling_policy(sp)
policy = connection.get_all_policies(policy_names=[sp_name])[0]
policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError, e:
module.fail_json(msg=str(e))
@ -115,7 +115,7 @@ def create_scaling_policy(connection, module):
try:
if changed:
connection.create_scaling_policy(policy)
policy = connection.get_all_policies(policy_names=[sp_name])[0]
policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError, e:
module.fail_json(msg=str(e))
@ -147,7 +147,6 @@ def main():
scaling_adjustment = dict(type='int'),
min_adjustment_step = dict(type='int'),
cooldown = dict(type='int'),
region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS),
state=dict(default='present', choices=['present', 'absent']),
)
)

@ -75,7 +75,7 @@ options:
required: false
version_added: "1.9"
author: Will Thames
author: "Will Thames (@willthames)"
extends_documentation_fragment: aws
'''

@ -42,7 +42,7 @@ options:
default: null
aliases: ['aws_region', 'ec2_region']
author: Lester Wade
author: "Lester Wade (@lwade)"
extends_documentation_fragment: aws
'''

@ -107,7 +107,7 @@ options:
default: present
choices: ['absent', 'present', 'list']
version_added: "1.6"
author: Lester Wade
author: "Lester Wade (@lwade)"
extends_documentation_fragment: aws
'''
@ -136,16 +136,16 @@ EXAMPLES = '''
image: "{{ image }}"
wait: yes
count: 3
register: ec2
register: ec2
- ec2_vol:
instance: "{{ item.id }} "
volume_size: 5
with_items: ec2.instances
register: ec2_vol
register: ec2_vol
# Example: Launch an instance and then add a volue if not already present
# Example: Launch an instance and then add a volume if not already attached
# * Volume will be created with the given name if not already created.
# * Nothing will happen if the volume is already attached.
# * Volume must exist in the same zone.
- ec2:
keypair: "{{ keypair }}"
@ -154,14 +154,14 @@ EXAMPLES = '''
id: my_instance
wait: yes
count: 1
register: ec2
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
name: my_existing_volume_Name_tag
device_name: /dev/xvdf
with_items: ec2.instances
register: ec2_vol
with_items: ec2.instances
register: ec2_vol
# Remove a volume
- ec2_vol:
@ -215,7 +215,13 @@ def get_volume(module, ec2):
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if not vols:
module.fail_json(msg="Could not find volume in zone (if specified): %s" % name or id)
if id:
msg = "Could not find the volume with id: %s" % id
if name:
msg += (" and name: %s" % name)
module.fail_json(msg=msg)
else:
return None
if len(vols) > 1:
module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
return vols[0]
@ -233,15 +239,14 @@ def get_volumes(module, ec2):
return vols
def delete_volume(module, ec2):
vol = get_volume(module, ec2)
if not vol:
module.exit_json(changed=False)
else:
if vol.attachment_state() is not None:
adata = vol.attach_data
module.fail_json(msg="Volume %s is attached to an instance %s." % (vol.id, adata.instance_id))
ec2.delete_volume(vol.id)
module.exit_json(changed=True)
volume_id = module.params['id']
try:
ec2.delete_volume(volume_id)
module.exit_json(changed=True)
except boto.exception.EC2ResponseError as ec2_error:
if ec2_error.code == 'InvalidVolume.NotFound':
module.exit_json(changed=False)
module.fail_json(msg=ec2_error.message)
def boto_supports_volume_encryption():
"""
@ -268,12 +273,8 @@ def create_volume(module, ec2, zone):
if instance == 'None' or instance == '':
instance = None
# If no instance supplied, try volume creation based on module parameters.
if name or id:
if iops or volume_size:
module.fail_json(msg = "Parameters are not compatible: [id or name] and [iops or volume_size]")
volume = get_volume(module, ec2)
volume = get_volume(module, ec2)
if volume:
if volume.attachment_state() is not None:
if instance is None:
return volume
@ -297,8 +298,12 @@ def create_volume(module, ec2, zone):
while volume.status != 'available':
time.sleep(3)
volume.update()
if name:
ec2.create_tags([volume.id], {"Name": name})
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return volume
@ -409,13 +414,10 @@ def main():
module.exit_json(changed=False, volumes=returned_volumes)
if id and name:
module.fail_json(msg="Both id and name cannot be specified")
if encrypted and not boto_supports_volume_encryption():
module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes")
# Here we need to get the zone info for the instance. This covers situation where
# Here we need to get the zone info for the instance. This covers situation where
# instance is specified but zone isn't.
# Useful for playbooks chaining instance launch with volume create + attach and where the
# zone doesn't matter to the user.
@ -437,9 +439,8 @@ def main():
if not volume_size and not (id or name):
module.fail_json(msg="You must specify an existing volume with id or name or a volume_size")
if volume_size and (id or name):
module.fail_json(msg="Cannot specify volume_size and either one of name or id")
if volume_size and id:
module.fail_json(msg="Cannot specify volume_size and id")
if state == 'absent':
delete_volume(module, ec2)

@ -58,7 +58,7 @@ options:
aliases: []
resource_tags:
description:
- 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exits, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.'
- 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exist, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.'
required: true
default: null
aliases: []
@ -72,7 +72,7 @@ options:
aliases: []
route_tables:
description:
- 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},] }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.'
- 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. resource_tags is optional and uses dictionary form: { "Name": "public", ... }. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.'
required: false
default: null
aliases: []
@ -100,7 +100,7 @@ options:
required: true
default: null
aliases: ['aws_region', 'ec2_region']
author: Carson Gee
author: "Carson Gee (@carsongee)"
extends_documentation_fragment: aws
'''
@ -227,6 +227,100 @@ def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
return (found_vpc)
def routes_match(rt_list=None, rt=None, igw=None):
"""
Check if the route table has all routes as in given list
rt_list : A list if routes provided in the module
rt : The Remote route table object
igw : The internet gateway object for this vpc
Returns:
True when there provided routes and remote routes are the same.
False when provided routes and remote routes are diffrent.
"""
local_routes = []
remote_routes = []
for route in rt_list:
route_kwargs = {}
if route['gw'] == 'igw':
route_kwargs['gateway_id'] = igw.id
route_kwargs['instance_id'] = None
route_kwargs['state'] = 'active'
elif route['gw'].startswith('i-'):
route_kwargs['instance_id'] = route['gw']
route_kwargs['gateway_id'] = None
route_kwargs['state'] = 'active'
else:
route_kwargs['gateway_id'] = route['gw']
route_kwargs['instance_id'] = None
route_kwargs['state'] = 'active'
route_kwargs['destination_cidr_block'] = route['dest']
local_routes.append(route_kwargs)
for j in rt.routes:
remote_routes.append(j.__dict__)
match = []
for i in local_routes:
change = "false"
for j in remote_routes:
if set(i.items()).issubset(set(j.items())):
change = "true"
match.append(change)
if 'false' in match:
return False
else:
return True
def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=None):
"""
Checks if the remote routes match the local routes.
route_tables : Route_tables parameter in the module
vpc_conn : The VPC conection object
module : The module object
vpc : The vpc object for this route table
igw : The internet gateway object for this vpc
Returns:
True when there is diffrence beween the provided routes and remote routes and if subnet assosications are diffrent.
False when both routes and subnet associations matched.
"""
#We add a one for the main table
rtb_len = len(route_tables) + 1
remote_rtb_len = len(vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id}))
if remote_rtb_len != rtb_len:
return True
for rt in route_tables:
rt_id = None
for sn in rt['subnets']:
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
if len(rsn) != 1:
module.fail_json(
msg='The subnet {0} to associate with route_table {1} ' \
'does not exist, aborting'.format(sn, rt)
)
nrt = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id, 'association.subnet-id': rsn[0].id})
if not nrt:
return True
else:
nrt = nrt[0]
if not rt_id:
rt_id = nrt.id
if not routes_match(rt['routes'], nrt, igw):
return True
continue
else:
if rt_id == nrt.id:
continue
else:
return True
return True
return False
def create_vpc(module, vpc_conn):
"""
Creates a new or modifies an existing VPC.
@ -357,6 +451,7 @@ def create_vpc(module, vpc_conn):
# Handle Internet gateway (create/delete igw)
igw = None
igw_id = None
igws = vpc_conn.get_all_internet_gateways(filters={'attachment.vpc-id': vpc.id})
if len(igws) > 1:
module.fail_json(msg='EC2 returned more than one Internet Gateway for id %s, aborting' % vpc.id)
@ -380,6 +475,9 @@ def create_vpc(module, vpc_conn):
except EC2ResponseError, e:
module.fail_json(msg='Unable to delete Internet Gateway, error: {0}'.format(e))
if igw is not None:
igw_id = igw.id
# Handle route tables - this may be worth splitting into a
# different module but should work fine here. The strategy to stay
# indempotent is to basically build all the route tables as
@ -391,6 +489,8 @@ def create_vpc(module, vpc_conn):
# the replace-route-table API to make this smoother and
# allow control of the 'main' routing table.
if route_tables is not None:
rtb_needs_change = rtb_changed(route_tables, vpc_conn, module, vpc, igw)
if route_tables is not None and rtb_needs_change:
if not isinstance(route_tables, list):
module.fail_json(msg='route tables need to be a list of dictionaries')
@ -399,6 +499,9 @@ def create_vpc(module, vpc_conn):
for rt in route_tables:
try:
new_rt = vpc_conn.create_route_table(vpc.id)
new_rt_tags = rt.get('resource_tags', None)
if new_rt_tags:
vpc_conn.create_tags(new_rt.id, new_rt_tags)
for route in rt['routes']:
route_kwargs = {}
if route['gw'] == 'igw':
@ -474,6 +577,7 @@ def create_vpc(module, vpc_conn):
module.fail_json(msg='Unable to delete old route table {0}, error: {1}'.format(rt.id, e))
vpc_dict = get_vpc_info(vpc)
created_vpc_id = vpc.id
returned_subnets = []
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
@ -486,16 +590,17 @@ def create_vpc(module, vpc_conn):
'id': sn.id,
})
# Sort subnets by the order they were listed in the play
order = {}
for idx, val in enumerate(subnets):
order[val['cidr']] = idx
if subnets is not None:
# Sort subnets by the order they were listed in the play
order = {}
for idx, val in enumerate(subnets):
order[val['cidr']] = idx
# Number of subnets in the play
subnets_in_play = len(subnets)
returned_subnets.sort(key=lambda x: order.get(x['cidr'], subnets_in_play))
# Number of subnets in the play
subnets_in_play = len(subnets)
returned_subnets.sort(key=lambda x: order.get(x['cidr'], subnets_in_play))
return (vpc_dict, created_vpc_id, returned_subnets, changed)
return (vpc_dict, created_vpc_id, returned_subnets, igw_id, changed)
def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
"""
@ -596,6 +701,7 @@ def main():
else:
module.fail_json(msg="region must be specified")
igw_id = None
if module.params.get('state') == 'absent':
vpc_id = module.params.get('vpc_id')
cidr = module.params.get('cidr_block')
@ -603,9 +709,9 @@ def main():
subnets_changed = None
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning a new VPC
(vpc_dict, new_vpc_id, subnets_changed, changed) = create_vpc(module, vpc_conn)
(vpc_dict, new_vpc_id, subnets_changed, igw_id, changed) = create_vpc(module, vpc_conn)
module.exit_json(changed=changed, vpc_id=new_vpc_id, vpc=vpc_dict, subnets=subnets_changed)
module.exit_json(changed=changed, vpc_id=new_vpc_id, vpc=vpc_dict, igw_id=igw_id, subnets=subnets_changed)
# import module snippets
from ansible.module_utils.basic import *

@ -22,7 +22,7 @@ description:
- Manage cache clusters in Amazon Elasticache.
- Returns information about the specified cache cluster.
version_added: "1.4"
author: Jim Dalton
author: "Jim Dalton (@jsdalton)"
options:
state:
description:
@ -62,7 +62,7 @@ options:
- The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc
required: conditional
default: None
version_added: "1.7"
version_added: "2.0"
security_group_ids:
description:
- A list of vpc security group names to associate with this cache cluster. Only use if inside a vpc

@ -0,0 +1,157 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: elasticache_subnet_group
version_added: "2.0"
short_description: manage Elasticache subnet groups
description:
- Creates, modifies, and deletes Elasticache subnet groups. This module has a dependency on python-boto >= 2.5.
options:
state:
description:
- Specifies whether the subnet should be present or absent.
required: true
default: present
choices: [ 'present' , 'absent' ]
name:
description:
- Database subnet group identifier.
required: true
description:
description:
- Elasticache subnet group description. Only set when a new group is added.
required: false
default: null
subnets:
description:
- List of subnet IDs that make up the Elasticache subnet group.
required: false
default: null
region:
description:
- The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
required: true
aliases: ['aws_region', 'ec2_region']
author: "Tim Mahoney (@timmahoney)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Add or change a subnet group
- elasticache_subnet_group
state: present
name: norwegian-blue
description: My Fancy Ex Parrot Subnet Group
subnets:
- subnet-aaaaaaaa
- subnet-bbbbbbbb
# Remove a subnet group
- elasticache_subnet_group:
state: absent
name: norwegian-blue
'''
try:
import boto
from boto.elasticache.layer1 import ElastiCacheConnection
from boto.regioninfo import RegionInfo
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(required=True, choices=['present', 'absent']),
name = dict(required=True),
description = dict(required=False),
subnets = dict(required=False, type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_description = module.params.get('description')
group_subnets = module.params.get('subnets') or {}
if state == 'present':
for required in ['name', 'description', 'subnets']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'subnets']:
if module.params.get(not_allowed):
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if not region:
module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
"""Get an elasticache connection"""
try:
endpoint = "elasticache.%s.amazonaws.com" % region
connect_region = RegionInfo(name=region, endpoint=endpoint)
conn = ElastiCacheConnection(region=connect_region, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=e.message)
try:
changed = False
exists = False
try:
matching_groups = conn.describe_cache_subnet_groups(group_name, max_records=100)
exists = len(matching_groups) > 0
except BotoServerError, e:
if e.error_code != 'CacheSubnetGroupNotFoundFault':
module.fail_json(msg = e.error_message)
if state == 'absent':
if exists:
conn.delete_cache_subnet_group(group_name)
changed = True
else:
if not exists:
new_group = conn.create_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets)
changed = True
else:
changed_group = conn.modify_cache_subnet_group(group_name, cache_subnet_group_description=group_description, subnet_ids=group_subnets)
changed = True
except BotoServerError, e:
if e.error_message != 'No modifications were requested.':
module.fail_json(msg = e.error_message)
else:
changed = False
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -0,0 +1,714 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: iam
short_description: Manage IAM users, groups, roles and keys
description:
- Allows for the management of IAM users, groups, roles and access keys.
version_added: "2.0"
options:
iam_type:
description:
- Type of IAM resource
required: true
default: null
choices: [ "user", "group", "role"]
name:
description:
- Name of IAM resource to create or identify
required: true
new_name:
description:
- When state is update, will replace name with new_name on IAM resource
required: false
default: null
new_path:
description:
- When state is update, will replace the path with new_path on the IAM resource
required: false
default: null
state:
description:
- Whether to create, delete or update the IAM resource. Note, roles cannot be updated.
required: true
default: null
choices: [ "present", "absent", "update" ]
path:
description:
- When creating or updating, specify the desired path of the resource. If state is present, it will replace the current path to match what is passed in when they do not match.
required: false
default: "/"
access_key_state:
description:
- When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified.
required: false
default: null
choices: [ "create", "remove", "active", "inactive"]
key_count:
description:
- When access_key_state is create it will ensure this quantity of keys are present. Defaults to 1.
required: false
default: '1'
access_key_ids:
description:
- A list of the keys that you want impacted by the access_key_state paramter.
groups:
description:
- A list of groups the user should belong to. When update, will gracefully remove groups not listed.
required: false
default: null
password:
description:
- When type is user and state is present, define the users login password. Also works with update. Note that always returns changed.
required: false
default: null
update_password:
required: false
default: always
choices: ['always', 'on_create']
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
notes:
- 'Currently boto does not support the removal of Managed Policies, the module will error out if your user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.'
author:
- "Jonathan I. Davila (@defionscode)"
- "Paul Seiffert (@seiffert)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Basic user creation example
tasks:
- name: Create two new IAM users with API keys
iam:
iam_type: user
name: "{{ item }}"
state: present
password: "{{ temp_pass }}"
access_key_state: create
with_items:
- jcleese
- mpython
# Advanced example, create two new groups and add the pre-existing user
# jdavila to both groups.
task:
- name: Create Two Groups, Mario and Luigi
iam:
iam_type: group
name: "{{ item }}"
state: present
with_items:
- Mario
- Luigi
register: new_groups
- name:
iam:
iam_type: user
name: jdavila
state: update
groups: "{{ item.created_group.group_name }}"
with_items: new_groups.results
'''
import json
import itertools
import sys
try:
import boto
import boto.iam
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def create_user(module, iam, name, pwd, path, key_state, key_count):
key_qty = 0
keys = []
try:
user_meta = iam.create_user(
name, path).create_user_response.create_user_result.user
changed = True
if pwd is not None:
pwd = iam.create_login_profile(name, pwd)
if key_state in ['create']:
if key_count:
while key_count > key_qty:
keys.append(iam.create_access_key(
user_name=name).create_access_key_response.\
create_access_key_result.\
access_key)
key_qty += 1
else:
keys = None
except boto.exception.BotoServerError, err:
module.fail_json(changed=False, msg=str(err))
else:
user_info = dict(created_user=user_meta, password=pwd, access_keys=keys)
return (user_info, changed)
def delete_user(module, iam, name):
try:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
for key in current_keys:
iam.delete_access_key(key, name)
del_meta = iam.delete_user(name).delete_user_response
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names:
iam.delete_user_policy(name, policy)
try:
del_meta = iam.delete_user(name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the polices "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
return del_meta, name, changed
else:
changed = True
return del_meta, name, changed
def update_user(module, iam, name, new_name, new_path, key_state, key_count, keys, pwd, updated):
changed = False
name_change = False
if updated and new_name:
name = new_name
try:
current_keys, status = \
[ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata],\
[ck['status'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
key_qty = len(current_keys)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if 'cannot be found' in error_msg and updated:
current_keys, status = \
[ck['access_key_id'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata],\
[ck['status'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
name = new_name
else:
module.fail_json(changed=False, msg=str(err))
updated_key_list = {}
if new_name or new_path:
c_path = iam.get_user(name).get_user_result.user['path']
if (name != new_name) or (c_path != new_path):
changed = True
try:
if not updated:
user = iam.update_user(
name, new_user_name=new_name, new_path=new_path).update_user_response.response_metadata
else:
user = iam.update_user(
name, new_path=new_path).update_user_response.response_metadata
user['updates'] = dict(
old_username=name, new_username=new_name, old_path=c_path, new_path=new_path)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
module.fail_json(changed=False, msg=str(err))
else:
if not updated:
name_change = True
if pwd:
try:
iam.update_login_profile(name, pwd)
changed = True
except boto.exception.BotoServerError:
try:
iam.create_login_profile(name, pwd)
changed = True
except boto.exception.BotoServerError, err:
error_msg = boto_exception(str(err))
if 'Password does not conform to the account password policy' in error_msg:
module.fail_json(changed=False, msg="Passsword doesn't conform to policy")
else:
module.fail_json(msg=error_msg)
else:
try:
iam.delete_login_profile(name)
changed = True
except boto.exception.BotoServerError:
pass
if key_state == 'create':
try:
while key_count > key_qty:
new_key = iam.create_access_key(
user_name=name).create_access_key_response.create_access_key_result.access_key
key_qty += 1
changed = True
except boto.exception.BotoServerError, err:
module.fail_json(changed=False, msg=str(err))
if keys and key_state:
for access_key in keys:
if access_key in current_keys:
for current_key, current_key_state in zip(current_keys, status):
if key_state != current_key_state.lower():
try:
iam.update_access_key(
access_key, key_state.capitalize(), user_name=name)
except boto.exception.BotoServerError, err:
module.fail_json(changed=False, msg=str(err))
else:
changed = True
if key_state == 'remove':
try:
iam.delete_access_key(access_key, user_name=name)
except boto.exception.BotoServerError, err:
module.fail_json(changed=False, msg=str(err))
else:
changed = True
try:
final_keys, final_key_status = \
[ck['access_key_id'] for ck in
iam.get_all_access_keys(name).
list_access_keys_result.
access_key_metadata],\
[ck['status'] for ck in
iam.get_all_access_keys(name).
list_access_keys_result.
access_key_metadata]
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
for fk, fks in zip(final_keys, final_key_status):
updated_key_list.update({fk: fks})
return name_change, updated_key_list, changed
def set_users_groups(module, iam, name, groups, updated=None,
new_name=None):
""" Sets groups for a user, will purge groups not explictly passed, while
retaining pre-existing groups that also are in the new list.
"""
changed = False
if updated:
name = new_name
try:
orig_users_groups = [og['group_name'] for og in iam.get_groups_for_user(
name).list_groups_for_user_result.groups]
remove_groups = [
rg for rg in frozenset(orig_users_groups).difference(groups)]
new_groups = [
ng for ng in frozenset(groups).difference(orig_users_groups)]
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
else:
if len(orig_users_groups) > 0:
for new in new_groups:
iam.add_user_to_group(new, name)
for rm in remove_groups:
iam.remove_user_from_group(rm, name)
else:
for group in groups:
try:
iam.add_user_to_group(group, name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('The group with name %s cannot be found.' % group) in error_msg:
module.fail_json(changed=False, msg="Group %s doesn't exist" % group)
if len(remove_groups) > 0 or len(new_groups) > 0:
changed = True
return (groups, changed)
def create_group(module=None, iam=None, name=None, path=None):
changed = False
try:
iam.create_group(
name, path).create_group_response.create_group_result.group
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
return name, changed
def delete_group(module=None, iam=None, name=None):
changed = False
try:
iam.delete_group(name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
for policy in iam.get_all_group_policies(name).list_group_policies_result.policy_names:
iam.delete_group_policy(name, policy)
try:
iam.delete_group(name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the polices "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
else:
changed = True
return changed, name
def update_group(module=None, iam=None, name=None, new_name=None, new_path=None):
changed = False
try:
current_group_path = iam.get_group(
name).get_group_response.get_group_result.group['path']
if new_path:
if current_group_path != new_path:
iam.update_group(name, new_path=new_path)
changed = True
if new_name:
if name != new_name:
iam.update_group(name, new_group_name=new_name, new_path=new_path)
changed = True
name = new_name
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
return changed, name, new_path, current_group_path
def create_role(module, iam, name, path, role_list, prof_list):
changed = False
try:
if name not in role_list:
changed = True
iam.create_role(
name, path=path).create_role_response.create_role_result.role.role_name
if name not in prof_list:
iam.create_instance_profile(name, path=path)
iam.add_role_to_instance_profile(name, name)
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
else:
updated_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response.
list_roles_result.roles]
return changed, updated_role_list
def delete_role(module, iam, name, role_list, prof_list):
changed = False
try:
if name in role_list:
cur_ins_prof = [rp['instance_profile_name'] for rp in
iam.list_instance_profiles_for_role(name).
list_instance_profiles_for_role_result.
instance_profiles]
for profile in cur_ins_prof:
iam.remove_role_from_instance_profile(profile, name)
try:
iam.delete_role(name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
for policy in iam.list_role_policies(name).list_role_policies_result.policy_names:
iam.delete_role_policy(name, policy)
try:
iam.delete_role(name)
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the polices "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
else:
changed = True
for prof in prof_list:
if name == prof:
iam.delete_instance_profile(name)
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err))
else:
updated_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response.
list_roles_result.roles]
return changed, updated_role_list
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
iam_type=dict(
default=None, required=True, choices=['user', 'group', 'role']),
groups=dict(type='list', default=None, required=False),
state=dict(
default=None, required=True, choices=['present', 'absent', 'update']),
password=dict(default=None, required=False),
update_password=dict(default='always', required=False, choices=['always', 'on_create']),
access_key_state=dict(default=None, required=False, choices=[
'active', 'inactive', 'create', 'remove',
'Active', 'Inactive', 'Create', 'Remove']),
access_key_ids=dict(type='list', default=None, required=False),
key_count=dict(type='int', default=1, required=False),
name=dict(default=None, required=False),
new_name=dict(default=None, required=False),
path=dict(default='/', required=False),
new_path=dict(default=None, required=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[],
)
if not HAS_BOTO:
module.fail_json(msg='This module requires boto, please install it')
state = module.params.get('state').lower()
iam_type = module.params.get('iam_type').lower()
groups = module.params.get('groups')
name = module.params.get('name')
new_name = module.params.get('new_name')
password = module.params.get('password')
update_pw = module.params.get('update_password')
path = module.params.get('path')
new_path = module.params.get('new_path')
key_count = module.params.get('key_count')
key_state = module.params.get('access_key_state')
if key_state:
key_state = key_state.lower()
if any([n in key_state for n in ['active', 'inactive']]) and not key_ids:
module.fail_json(changed=False, msg="At least one access key has to be defined in order"
" to use 'active' or 'inactive'")
key_ids = module.params.get('access_key_ids')
if iam_type == 'user' and module.params.get('password') is not None:
pwd = module.params.get('password')
elif iam_type != 'user' and module.params.get('password') is not None:
module.fail_json(msg="a password is being specified when the iam_type "
"is not user. Check parameters")
else:
pwd = None
if iam_type != 'user' and (module.params.get('access_key_state') is not None or
module.params.get('access_key_id') is not None):
module.fail_json(msg="the IAM type must be user, when IAM access keys "
"are being modified. Check parameters")
if iam_type == 'role' and state == 'update':
module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, "
"please specificy present or absent")
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
result = {}
changed = False
orig_group_list = [gl['group_name'] for gl in iam.get_all_groups().
list_groups_result.
groups]
orig_user_list = [ul['user_name'] for ul in iam.get_all_users().
list_users_result.
users]
orig_role_list = [rl['role_name'] for rl in iam.list_roles().list_roles_response.
list_roles_result.
roles]
orig_prof_list = [ap['instance_profile_name'] for ap in iam.list_instance_profiles().
list_instance_profiles_response.
list_instance_profiles_result.
instance_profiles]
if iam_type == 'user':
been_updated = False
user_groups = None
user_exists = any([n in [name, new_name] for n in orig_user_list])
if user_exists:
current_path = iam.get_user(name).get_user_result.user['path']
if not new_path and current_path != path:
new_path = path
path = current_path
if state == 'present' and not user_exists and not new_name:
(meta, changed) = create_user(
module, iam, name, password, path, key_state, key_count)
keys = iam.get_all_access_keys(name).list_access_keys_result.\
access_key_metadata
if groups:
(user_groups, changed) = set_users_groups(
module, iam, name, groups, been_updated, new_name)
module.exit_json(
user_meta=meta, groups=user_groups, keys=keys, changed=changed)
elif state in ['present', 'update'] and user_exists:
if update_pw == 'on_create':
password = None
if name not in orig_user_list and new_name in orig_user_list:
been_updated = True
name_change, key_list, user_changed = update_user(
module, iam, name, new_name, new_path, key_state, key_count, key_ids, password, been_updated)
if name_change and new_name:
orig_name = name
name = new_name
if groups:
user_groups, groups_changed = set_users_groups(
module, iam, name, groups, been_updated, new_name)
if groups_changed == user_changed:
changed = groups_changed
else:
changed = True
else:
changed = user_changed
if new_name and new_path:
module.exit_json(changed=changed, groups=user_groups, old_user_name=orig_name,
new_user_name=new_name, old_path=path, new_path=new_path, keys=key_list)
elif new_name and not new_path and not been_updated:
module.exit_json(
changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, keys=key_list)
elif new_name and not new_path and been_updated:
module.exit_json(
changed=changed, groups=user_groups, user_name=new_name, keys=key_list, key_state=key_state)
elif not new_name and new_path:
module.exit_json(
changed=changed, groups=user_groups, user_name=name, old_path=path, new_path=new_path, keys=key_list)
else:
module.exit_json(
changed=changed, groups=user_groups, user_name=name, keys=key_list)
elif state == 'update' and not user_exists:
module.fail_json(
msg="The user %s does not exit. No update made." % name)
elif state == 'absent':
if name in orig_user_list:
set_users_groups(module, iam, name, '')
del_meta, name, changed = delete_user(module, iam, name)
module.exit_json(
deletion_meta=del_meta, deleted_user=name, changed=changed)
else:
module.exit_json(
changed=False, msg="User %s is already absent from your AWS IAM users" % name)
elif iam_type == 'group':
group_exists = name in orig_group_list
if state == 'present' and not group_exists:
new_group, changed = create_group(iam=iam, name=name, path=path)
module.exit_json(changed=changed, group_name=new_group)
elif state in ['present', 'update'] and group_exists:
changed, updated_name, updated_path, cur_path = update_group(
iam=iam, name=name, new_name=new_name, new_path=new_path)
if new_path and new_name:
module.exit_json(changed=changed, old_group_name=name,
new_group_name=updated_name, old_path=cur_path,
new_group_path=updated_path)
if new_path and not new_name:
module.exit_json(changed=changed, group_name=name,
old_path=cur_path,
new_group_path=updated_path)
if not new_path and new_name:
module.exit_json(changed=changed, old_group_name=name,
new_group_name=updated_name, group_path=cur_path)
if not new_path and not new_name:
module.exit_json(
changed=changed, group_name=name, group_path=cur_path)
elif state == 'update' and not group_exists:
module.fail_json(
changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name)
elif state == 'absent':
if name in orig_group_list:
removed_group, changed = delete_group(iam=iam, name=name)
module.exit_json(changed=changed, delete_group=removed_group)
else:
module.exit_json(changed=changed, msg="Group already absent")
elif iam_type == 'role':
role_list = []
if state == 'present':
changed, role_list = create_role(
module, iam, name, path, orig_role_list, orig_prof_list)
elif state == 'absent':
changed, role_list = delete_role(
module, iam, name, orig_role_list, orig_prof_list)
elif state == 'update':
module.fail_json(
changed=False, msg='Role update not currently supported by boto.')
module.exit_json(changed=changed, roles=role_list)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -0,0 +1,294 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: iam_cert
short_description: Manage server certificates for use on ELBs and CloudFront
description:
- Allows for the management of server certificates
version_added: "2.0"
options:
name:
description:
- Name of certificate to add, update or remove.
required: true
aliases: []
new_name:
description:
- When present, this will update the name of the cert with the value passed here.
required: false
aliases: []
new_path:
description:
- When present, this will update the path of the cert with the value passed here.
required: false
aliases: []
state:
description:
- Whether to create, delete certificate. When present is specified it will attempt to make an update if new_path or new_name is specified.
required: true
default: null
choices: [ "present", "absent" ]
aliases: []
path:
description:
- When creating or updating, specify the desired path of the certificate
required: false
default: "/"
aliases: []
cert_chain:
description:
- The path to the CA certificate chain in PEM encoded format.
required: false
default: null
aliases: []
cert:
description:
- The path to the certificate body in PEM encoded format.
required: false
aliases: []
key:
description:
- The path to the private key of the certificate in PEM encoded format.
dup_ok:
description:
- By default the module will not upload a certifcate that is already uploaded into AWS. If set to True, it will upload the certifcate as long as the name is unique.
required: false
default: False
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
requirements: [ "boto" ]
author: Jonathan I. Davila
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Basic server certificate upload
tasks:
- name: Upload Certifcate
iam_cert:
name: very_ssl
state: present
cert: somecert.pem
key: privcertkey
cert_chain: myverytrustedchain
'''
import json
import sys
try:
import boto
import boto.iam
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def cert_meta(iam, name):
opath = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
path
ocert = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
certificate_body
ocert_id = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
server_certificate_id
upload_date = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
upload_date
exp = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
expiration
return opath, ocert, ocert_id, upload_date, exp
def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok):
update=False
if any(ct in orig_cert_names for ct in [name, new_name]):
for i_name in [name, new_name]:
if i_name is None:
continue
if cert is not None:
try:
c_index=orig_cert_names.index(i_name)
except NameError:
continue
else:
if orig_cert_bodies[c_index] == cert:
update=True
break
elif orig_cert_bodies[c_index] != cert:
module.fail_json(changed=False, msg='A cert with the name %s already exists and'
' has a different certificate body associated'
' with it. Certifcates cannot have the same name')
else:
update=True
break
elif cert in orig_cert_bodies and not dup_ok:
for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies):
if crt_body == cert:
module.fail_json(changed=False, msg='This certificate already'
' exists under the name %s' % crt_name)
return update
def cert_action(module, iam, name, cpath, new_name, new_path, state,
cert, key, chain, orig_cert_names, orig_cert_bodies, dup_ok):
if state == 'present':
update = dup_check(module, iam, name, new_name, cert, orig_cert_names,
orig_cert_bodies, dup_ok)
if update:
opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)
changed=True
if new_name and new_path:
iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif new_name and not new_path:
iam.update_server_cert(name, new_cert_name=new_name)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif not new_name and new_path:
iam.update_server_cert(name, new_path=new_path)
module.exit_json(changed=changed, name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
else:
changed=False
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp,
msg='No new path or name specified. No changes made')
else:
changed=True
iam.upload_server_cert(name, cert, key, cert_chain=chain, path=cpath)
opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif state == 'absent':
if name in orig_cert_names:
changed=True
iam.delete_server_cert(name)
module.exit_json(changed=changed, deleted_cert=name)
else:
changed=False
module.exit_json(changed=changed, msg='Certifcate with the name %s already absent' % name)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(
default=None, required=True, choices=['present', 'absent']),
name=dict(default=None, required=False),
cert=dict(default=None, required=False),
key=dict(default=None, required=False),
cert_chain=dict(default=None, required=False),
new_name=dict(default=None, required=False),
path=dict(default='/', required=False),
new_path=dict(default=None, required=False),
dup_ok=dict(default=False, required=False, choices=[False, True])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[],
)
if not HAS_BOTO:
module.fail_json(msg="Boto is required for this module")
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
try:
iam = boto.iam.connection.IAMConnection(
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
state = module.params.get('state')
name = module.params.get('name')
path = module.params.get('path')
new_name = module.params.get('new_name')
new_path = module.params.get('new_path')
cert_chain = module.params.get('cert_chain')
dup_ok = module.params.get('dup_ok')
if state == 'present':
cert = open(module.params.get('cert'), 'r').read().rstrip()
key = open(module.params.get('key'), 'r').read().rstrip()
if cert_chain is not None:
cert_chain = open(module.params.get('cert_chain'), 'r').read()
else:
key=cert=chain=None
orig_certs = [ctb['server_certificate_name'] for ctb in \
iam.get_all_server_certs().\
list_server_certificates_result.\
server_certificate_metadata_list]
orig_bodies = [iam.get_server_certificate(thing).\
get_server_certificate_result.\
certificate_body \
for thing in orig_certs]
if new_name == name:
new_name = None
if new_path == path:
new_path = None
changed = False
try:
cert_action(module, iam, name, path, new_name, new_path, state,
cert, key, cert_chain, orig_certs, orig_bodies, dup_ok)
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err), debug=[cert,key])
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()

@ -0,0 +1,325 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: iam_policy
short_description: Manage IAM policies for users, groups, and roles
description:
- Allows uploading or removing IAM policies for IAM users, groups or roles.
version_added: "2.0"
options:
iam_type:
description:
- Type of IAM resource
required: true
default: null
choices: [ "user", "group", "role"]
aliases: []
iam_name:
description:
- Name of IAM resource you wish to target for policy actions. In other words, the user name, group name or role name.
required: true
aliases: []
policy_name:
description:
- The name label for the policy to create or remove.
required: false
aliases: []
policy_document:
description:
- The path to the properly json formatted policy file
required: false
aliases: []
state:
description:
- Whether to create or delete the IAM policy.
required: true
default: null
choices: [ "present", "absent"]
aliases: []
skip_duplicates:
description:
- By default the module looks for any policies that match the document you pass in, if there is a match it will not make a new policy object with the same rules. You can override this by specifying false which would allow for two policy objects with different names but same rules.
required: false
default: "/"
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
requirements: [ "boto" ]
notes:
- 'Currently boto does not support the removal of Managed Policies, the module will not work removing/adding managed policies.'
author: "Jonathan I. Davila (@defionscode)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Create and policy with the name of 'Admin' to the group 'administrators'
tasks:
- name: Create two new IAM users with API keys
iam_policy:
iam_type: group
iam_name: administrators
policy_name: Admin
state: present
policy_document: admin_policy.json
# Advanced example, create two new groups and add a READ-ONLY policy to both
# groups.
task:
- name: Create Two Groups, Mario and Luigi
iam:
iam_type: group
name: "{{ item }}"
state: present
with_items:
- Mario
- Luigi
register: new_groups
- name:
iam_policy:
iam_type: group
iam_name: "{{ item.created_group.group_name }}"
policy_name: "READ-ONLY"
policy_document: readonlypolicy.json
state: present
with_items: new_groups.results
'''
import json
import urllib
import sys
try:
import boto
import boto.iam
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def user_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
try:
current_policies = [cp for cp in iam.get_all_user_policies(name).
list_user_policies_result.
policy_names]
for pol in current_policies:
'''
urllib is needed here because boto returns url encoded strings instead
'''
if urllib.unquote(iam.get_user_policy(name, pol).
get_user_policy_result.policy_document) == pdoc:
policy_match = True
if policy_match:
msg=("The policy document you specified already exists "
"under the name %s." % pol)
if state == 'present' and skip:
if policy_name not in current_policies and not policy_match:
changed = True
iam.put_user_policy(name, policy_name, pdoc)
elif state == 'present' and not skip:
changed = True
iam.put_user_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_user_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed, msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.get_all_user_policies(name).
list_user_policies_result.
policy_names]
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies
def role_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
try:
current_policies = [cp for cp in iam.list_role_policies(name).
list_role_policies_result.
policy_names]
for pol in current_policies:
if urllib.unquote(iam.get_role_policy(name, pol).
get_role_policy_result.policy_document) == pdoc:
policy_match = True
if policy_match:
msg=("The policy document you specified already exists "
"under the name %s." % pol)
if state == 'present' and skip:
if policy_name not in current_policies and not policy_match:
changed = True
iam.put_role_policy(name, policy_name, pdoc)
elif state == 'present' and not skip:
changed = True
iam.put_role_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_role_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed,
msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.list_role_policies(name).
list_role_policies_result.
policy_names]
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies
def group_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
msg=''
try:
current_policies = [cp for cp in iam.get_all_group_policies(name).
list_group_policies_result.
policy_names]
for pol in current_policies:
if urllib.unquote(iam.get_group_policy(name, pol).
get_group_policy_result.policy_document) == pdoc:
policy_match = True
if policy_match:
msg=("The policy document you specified already exists "
"under the name %s." % pol)
if state == 'present' and skip:
if policy_name not in current_policies and not policy_match:
changed = True
iam.put_group_policy(name, policy_name, pdoc)
elif state == 'present' and not skip:
changed = True
iam.put_group_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_group_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed,
msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.get_all_group_policies(name).
list_group_policies_result.
policy_names]
except boto.exception.BotoServerError, err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies, msg
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
iam_type=dict(
default=None, required=True, choices=['user', 'group', 'role']),
state=dict(
default=None, required=True, choices=['present', 'absent']),
iam_name=dict(default=None, required=False),
policy_name=dict(default=None, required=True),
policy_document=dict(default=None, required=False),
skip_duplicates=dict(type='bool', default=True, required=False)
))
module = AnsibleModule(
argument_spec=argument_spec,
)
state = module.params.get('state').lower()
iam_type = module.params.get('iam_type').lower()
state = module.params.get('state')
name = module.params.get('iam_name')
policy_name = module.params.get('policy_name')
skip = module.params.get('skip_duplicates')
if module.params.get('policy_document') != None:
with open(module.params.get('policy_document'), 'r') as json_data:
pdoc = json.dumps(json.load(json_data))
json_data.close()
else:
pdoc=None
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
try:
iam = boto.iam.connection.IAMConnection(
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
changed = False
if iam_type == 'user':
changed, user_name, current_policies = user_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, user_name=name, policies=current_policies)
elif iam_type == 'role':
changed, role_name, current_policies = role_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, role_name=name, policies=current_policies)
elif iam_type == 'group':
changed, group_name, current_policies, msg = group_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, group_name=name, policies=current_policies, msg=msg)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -102,7 +102,7 @@ options:
required: false
default: null
aliases: []
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license' ]
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
multi_zone:
description:
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify.
@ -130,7 +130,7 @@ options:
aliases: []
port:
description:
- Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1443 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate.
- Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate.
required: false
default: null
aliases: []
@ -241,8 +241,13 @@ options:
default: null
aliases: []
version_added: 1.9
requirements: [ "boto" ]
author: Bruce Pennypacker, Will Thames
requirements:
- "python >= 2.6"
- "boto"
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Will Thames (@willthames)"
'''
# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
@ -622,6 +627,8 @@ def await_resource(conn, resource, status, module):
if resource.name is None:
module.fail_json(msg="Problem with instance %s" % resource.instance)
resource = conn.get_db_instance(resource.name)
if resource is None:
break
return resource
@ -759,7 +766,7 @@ def modify_db_instance(module, conn):
valid_vars = ['apply_immediately', 'backup_retention', 'backup_window',
'db_name', 'engine_version', 'instance_type', 'iops', 'license_model',
'maint_window', 'multi_zone', 'new_instance_name',
'option_group', 'parameter_group' 'password', 'size', 'upgrade']
'option_group', 'parameter_group', 'password', 'size', 'upgrade']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
@ -961,7 +968,7 @@ def main():
db_name = dict(required=False),
engine_version = dict(required=False),
parameter_group = dict(required=False),
license_model = dict(choices=['license-included', 'bring-your-own-license', 'general-public-license'], required=False),
license_model = dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False),
multi_zone = dict(type='bool', default=False),
iops = dict(required=False),
security_groups = dict(required=False),

@ -67,7 +67,7 @@ options:
required: true
default: null
aliases: ['aws_region', 'ec2_region']
author: Scott Anderson
author: "Scott Anderson (@tastychutney)"
extends_documentation_fragment: aws
'''
@ -78,7 +78,7 @@ EXAMPLES = '''
name: norwegian_blue
description: 'My Fancy Ex Parrot Group'
engine: 'mysql5.6'
params:
params:
auto_increment_increment: "42K"
# Remove a parameter group
@ -196,8 +196,8 @@ def modify_group(group, params, immediate=False):
if not param.is_modifiable:
raise NotModifiableError('Parameter %s is not modifiable.' % key)
changed[key] = {'old': param.value, 'new': new_value}
changed[key] = {'old': old_value, 'new': new_value}
set_parameter(param, new_value, immediate)
del new_params[key]
@ -260,7 +260,7 @@ def main():
if e.error_code != 'DBParameterGroupNotFound':
module.fail_json(msg = e.error_message)
exists = False
if state == 'absent':
if exists:
conn.delete_parameter_group(group_name)
@ -284,7 +284,7 @@ def main():
marker = next_group.Marker
else:
break
except BotoServerError, e:
module.fail_json(msg = e.error_message)

@ -53,7 +53,7 @@ options:
required: true
default: null
aliases: ['aws_region', 'ec2_region']
author: Scott Anderson
author: "Scott Anderson (@tastychutney)"
extends_documentation_fragment: aws
'''
@ -138,10 +138,14 @@ def main():
else:
if not exists:
new_group = conn.create_db_subnet_group(group_name, desc=group_description, subnet_ids=group_subnets)
changed = True
else:
changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets)
# Sort the subnet groups before we compare them
matching_groups[0].subnet_ids.sort()
group_subnets.sort()
if ( (matching_groups[0].name != group_name) or (matching_groups[0].description != group_description) or (matching_groups[0].subnet_ids != group_subnets) ):
changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets)
changed = True
except BotoServerError, e:
module.fail_json(msg = e.error_message)

@ -93,7 +93,46 @@ options:
required: false
default: false
version_added: "1.9"
author: Bruce Pennypacker
identifier:
description:
- Weighted and latency-based resource record sets only. An identifier
that differentiates among multiple resource record sets that have the
same combination of DNS name and type.
required: false
default: null
version_added: "2.0"
weight:
description:
- Weighted resource record sets only. Among resource record sets that
have the same combination of DNS name and type, a value that
determines what portion of traffic for the current resource record set
is routed to the associated location.
required: false
default: null
version_added: "2.0"
region:
description:
- Latency-based resource record sets only Among resource record sets
that have the same combination of DNS name and type, a value that
determines which region this should be associated with for the
latency-based routing
required: false
default: null
version_added: "2.0"
health_check:
description:
- Health check to associate with this record
required: false
default: null
version_added: "2.0"
failover:
description:
- Failover resource record sets only. Whether this is the primary or
secondary resource record set.
required: false
default: null
version_added: "2.0"
author: "Bruce Pennypacker (@bpennypacker)"
extends_documentation_fragment: aws
'''
@ -156,6 +195,18 @@ EXAMPLES = '''
alias=True
alias_hosted_zone_id="{{ elb_zone_id }}"
# Use a routing policy to distribute traffic:
- route53:
command: "create"
zone: "foo.com"
record: "www.foo.com"
type: "CNAME"
value: "host1.foo.com"
ttl: 30
# Routing policy
identifier: "host1@www"
weight: 100
health_check: "d994b780-3150-49fd-9205-356abdd42e75"
'''
@ -166,11 +217,21 @@ try:
import boto.ec2
from boto import route53
from boto.route53 import Route53Connection
from boto.route53.record import ResourceRecordSets
from boto.route53.record import Record, ResourceRecordSets
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_zone_by_name(conn, module, zone_name, want_private):
"""Finds a zone by name"""
for zone in conn.get_zones():
# only save this zone id if the private status of the zone matches
# the private_zone_in boolean specified in the params
private_zone = module.boolean(zone.config.get('PrivateZone', False))
if private_zone == want_private and zone.name == zone_name:
return zone
return None
def commit(changes, retry_interval):
"""Commit changes, but retry PriorRequestNotComplete errors."""
@ -192,7 +253,7 @@ def main():
command = dict(choices=['get', 'create', 'delete'], required=True),
zone = dict(required=True),
record = dict(required=True),
ttl = dict(required=False, default=3600),
ttl = dict(required=False, type='int', default=3600),
type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True),
alias = dict(required=False, type='bool'),
alias_hosted_zone_id = dict(required=False),
@ -200,6 +261,11 @@ def main():
overwrite = dict(required=False, type='bool'),
retry_interval = dict(required=False, default=500),
private_zone = dict(required=False, type='bool', default=False),
identifier = dict(required=False),
weight = dict(required=False, type='int'),
region = dict(required=False),
health_check = dict(required=False),
failover = dict(required=False),
)
)
module = AnsibleModule(argument_spec=argument_spec)
@ -217,6 +283,11 @@ def main():
alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id')
retry_interval_in = module.params.get('retry_interval')
private_zone_in = module.params.get('private_zone')
identifier_in = module.params.get('identifier')
weight_in = module.params.get('weight')
region_in = module.params.get('region')
health_check_in = module.params.get('health_check')
failover_in = module.params.get('failover')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
@ -224,7 +295,7 @@ def main():
if type(value_in) is str:
if value_in:
value_list = sorted(value_in.split(','))
value_list = sorted([s.strip() for s in value_in.split(',')])
elif type(value_in) is list:
value_list = sorted(value_in)
@ -249,32 +320,34 @@ def main():
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
# Get all the existing hosted zones and save their ID's
zones = {}
results = conn.get_all_hosted_zones()
for r53zone in results['ListHostedZonesResponse']['HostedZones']:
# only save this zone id if the private status of the zone matches
# the private_zone_in boolean specified in the params
if module.boolean(r53zone['Config'].get('PrivateZone', False)) == private_zone_in:
zone_id = r53zone['Id'].replace('/hostedzone/', '')
zones[r53zone['Name']] = zone_id
# Find the named zone ID
zone = get_zone_by_name(conn, module, zone_in, private_zone_in)
# Verify that the requested zone is already defined in Route53
if not zone_in in zones:
if zone is None:
errmsg = "Zone %s does not exist in Route53" % zone_in
module.fail_json(msg = errmsg)
record = {}
found_record = False
sets = conn.get_all_rrsets(zones[zone_in])
wanted_rset = Record(name=record_in, type=type_in, ttl=ttl_in,
identifier=identifier_in, weight=weight_in, region=region_in,
health_check=health_check_in, failover=failover_in)
for v in value_list:
if alias_in:
wanted_rset.set_alias(alias_hosted_zone_id_in, v)
else:
wanted_rset.add_value(v)
sets = conn.get_all_rrsets(zone.id, name=record_in, type=type_in, identifier=identifier_in)
for rset in sets:
# Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round
# tripping of things like * and @.
decoded_name = rset.name.replace(r'\052', '*')
decoded_name = decoded_name.replace(r'\100', '@')
if rset.type == type_in and decoded_name.lower() == record_in.lower():
if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == identifier_in:
found_record = True
record['zone'] = zone_in
record['type'] = rset.type
@ -282,6 +355,11 @@ def main():
record['ttl'] = rset.ttl
record['value'] = ','.join(sorted(rset.resource_records))
record['values'] = sorted(rset.resource_records)
record['identifier'] = rset.identifier
record['weight'] = rset.weight
record['region'] = rset.region
record['failover'] = rset.failover
record['health_check'] = rset.health_check
if rset.alias_dns_name:
record['alias'] = True
record['value'] = rset.alias_dns_name
@ -291,8 +369,9 @@ def main():
record['alias'] = False
record['value'] = ','.join(sorted(rset.resource_records))
record['values'] = sorted(rset.resource_records)
if value_list == sorted(rset.resource_records) and int(record['ttl']) == ttl_in and command_in == 'create':
if command_in == 'create' and rset.to_xml() == wanted_rset.to_xml():
module.exit_json(changed=False)
break
if command_in == 'get':
module.exit_json(changed=False, set=record)
@ -300,26 +379,16 @@ def main():
if command_in == 'delete' and not found_record:
module.exit_json(changed=False)
changes = ResourceRecordSets(conn, zones[zone_in])
if command_in == 'create' and found_record:
if not module.params['overwrite']:
module.fail_json(msg = "Record already exists with different value. Set 'overwrite' to replace it")
else:
change = changes.add_change("DELETE", record_in, type_in, record['ttl'])
for v in record['values']:
if record['alias']:
change.set_alias(record['alias_hosted_zone_id'], v)
else:
change.add_value(v)
changes = ResourceRecordSets(conn, zone.id)
if command_in == 'create' or command_in == 'delete':
change = changes.add_change(command_in.upper(), record_in, type_in, ttl_in)
for v in value_list:
if module.params['alias']:
change.set_alias(alias_hosted_zone_id_in, v)
else:
change.add_value(v)
if command_in == 'create' and found_record:
if not module.params['overwrite']:
module.fail_json(msg = "Record already exists with different value. Set 'overwrite' to replace it")
command = 'UPSERT'
else:
command = command_in.upper()
changes.add_change_record(command, wanted_rset)
try:
result = commit(changes, retry_interval_in)

@ -22,68 +22,97 @@ description:
- This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and deleting both objects and buckets, retrieving objects as files or strings and generating download links. This module has a dependency on python-boto.
version_added: "1.1"
options:
bucket:
aws_access_key:
description:
- Bucket name.
required: true
default: null
aliases: []
object:
description:
- Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
- AWS access key id. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: []
version_added: "1.3"
src:
aliases: [ 'ec2_access_key', 'access_key' ]
aws_secret_key:
description:
- The source file path when performing a PUT operation.
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: ['ec2_secret_key', 'secret_key']
bucket:
description: Bucket name.
required: true
default: null
aliases: []
version_added: "1.3"
dest:
description:
- The destination file path when downloading an object/key with a GET operation.
required: false
aliases: []
version_added: "1.3"
overwrite:
encrypt:
description:
- Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
- When set for PUT mode, asks for server-side encryption
required: false
default: true
version_added: "1.2"
mode:
description:
- Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket) and delete (bucket).
required: true
default: null
aliases: []
default: no
expiration:
description:
- Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation.
required: false
default: 600
aliases: []
s3_url:
description:
- "S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS"
default: null
aliases: [ S3_URL ]
metadata:
description:
- Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
required: false
default: null
version_added: "1.6"
mode:
description:
- Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket), delete (bucket), and delobj (delete object).
required: true
default: null
aliases: []
object:
description:
- Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
required: false
default: null
version:
description:
- Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
required: false
default: null
aliases: []
version_added: "2.0"
overwrite:
description:
- Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
required: false
default: true
version_added: "1.2"
region:
description:
- "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect."
required: false
default: null
version_added: "1.8"
author: Lester Wade, Ralph Tice
retries:
description:
- On recoverable failure, how many times to retry before actually failing.
required: false
default: 0
version_added: "2.0"
s3_url:
description: S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS
default: null
aliases: [ S3_URL ]
src:
description: The source file path when performing a PUT operation.
required: false
default: null
aliases: []
version_added: "1.3"
requirements: [ "boto" ]
author:
- "Lester Wade (@lwade)"
- "Ralph Tice (@ralph-tice)"
extends_documentation_fragment: aws
'''
@ -94,6 +123,9 @@ EXAMPLES = '''
# Simple GET operation
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get
# Get a specific version of an object.
- s3: bucket=mybucket object=/my/desired/key.txt version=48c9ee5131af7a716edc22df9772aa6f dest=/usr/local/myfile.txt mode=get
# PUT/upload with metadata
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache'
@ -105,14 +137,22 @@ EXAMPLES = '''
# Delete a bucket and all contents
- s3: bucket=mybucket mode=delete
# GET an object but dont download if the file checksums match
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get overwrite=different
# Delete an object from a bucket
- s3: bucket=mybucket object=/my/desired/key.txt mode=delobj
'''
import os
import urlparse
import hashlib
from ssl import SSLError
try:
import boto
import boto.ec2
from boto.s3.connection import Location
from boto.s3.connection import OrdinaryCallingFormat
from boto.s3.connection import S3Connection
@ -120,21 +160,23 @@ try:
except ImportError:
HAS_BOTO = False
def key_check(module, s3, bucket, obj):
def key_check(module, s3, bucket, obj, version=None):
try:
bucket = s3.lookup(bucket)
key_check = bucket.get_key(obj)
key_check = bucket.get_key(obj, version_id=version)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if version is not None and e.status == 400: # If a specified version doesn't exist a 400 is returned.
key_check = None
else:
module.fail_json(msg=str(e))
if key_check:
return True
else:
return False
def keysum(module, s3, bucket, obj):
def keysum(module, s3, bucket, obj, version=None):
bucket = s3.lookup(bucket)
key_check = bucket.get_key(obj)
key_check = bucket.get_key(obj, version_id=version)
if not key_check:
return None
md5_remote = key_check.etag[1:-1]
@ -153,7 +195,9 @@ def bucket_check(module, s3, bucket):
else:
return False
def create_bucket(module, s3, bucket, location=Location.DEFAULT):
def create_bucket(module, s3, bucket, location=None):
if location is None:
location = Location.DEFAULT
try:
bucket = s3.create_bucket(bucket, location=location)
except s3.provider.storage_response_error, e:
@ -203,7 +247,8 @@ def path_check(path):
else:
return False
def upload_s3file(module, s3, bucket, obj, src, expiry, metadata):
def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt):
try:
bucket = s3.lookup(bucket)
key = bucket.new_key(obj)
@ -211,25 +256,34 @@ def upload_s3file(module, s3, bucket, obj, src, expiry, metadata):
for meta_key in metadata.keys():
key.set_metadata(meta_key, metadata[meta_key])
key.set_contents_from_filename(src)
key.set_contents_from_filename(src, encrypt_key=encrypt)
url = key.generate_url(expiry)
module.exit_json(msg="PUT operation complete", url=url, changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def download_s3file(module, s3, bucket, obj, dest):
try:
bucket = s3.lookup(bucket)
key = bucket.lookup(obj)
key.get_contents_to_filename(dest)
module.exit_json(msg="GET operation complete", changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def download_s3str(module, s3, bucket, obj):
def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
# retries is the number of loops; range/xrange needs to be one
# more to get that count of loops.
bucket = s3.lookup(bucket)
key = bucket.get_key(obj, version_id=version)
for x in range(0, retries + 1):
try:
key.get_contents_to_filename(dest)
module.exit_json(msg="GET operation complete", changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
except SSLError as e:
# actually fail on last pass through the loop.
if x >= retries:
module.fail_json(msg="s3 download failed; %s" % e)
# otherwise, try again, this may be a transient timeout.
pass
def download_s3str(module, s3, bucket, obj, version=None):
try:
bucket = s3.lookup(bucket)
key = bucket.lookup(obj)
key = bucket.get_key(obj, version_id=version)
contents = key.get_contents_as_string()
module.exit_json(msg="GET operation complete", contents=contents, changed=True)
except s3.provider.storage_copy_error, e:
@ -261,18 +315,29 @@ def is_walrus(s3_url):
else:
return False
def get_md5_digest(local_file):
md5 = hashlib.md5()
with open(local_file, 'rb') as f:
for data in f.read(1024 ** 2):
md5.update(data)
return md5.hexdigest()
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
bucket = dict(required=True),
object = dict(),
src = dict(),
dest = dict(default=None),
mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr'], required=True),
encrypt = dict(default=True, type='bool'),
expiry = dict(default=600, aliases=['expiration']),
metadata = dict(type='dict'),
mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj'], required=True),
object = dict(),
version = dict(default=None),
overwrite = dict(aliases=['force'], default='always'),
retries = dict(aliases=['retry'], type='int', default=0),
s3_url = dict(aliases=['S3_URL']),
overwrite = dict(aliases=['force'], default=True, type='bool'),
metadata = dict(type='dict'),
src = dict(),
),
)
module = AnsibleModule(argument_spec=argument_spec)
@ -281,15 +346,30 @@ def main():
module.fail_json(msg='boto required for this module')
bucket = module.params.get('bucket')
obj = module.params.get('object')
src = module.params.get('src')
encrypt = module.params.get('encrypt')
expiry = int(module.params['expiry'])
if module.params.get('dest'):
dest = os.path.expanduser(module.params.get('dest'))
metadata = module.params.get('metadata')
mode = module.params.get('mode')
expiry = int(module.params['expiry'])
s3_url = module.params.get('s3_url')
obj = module.params.get('object')
version = module.params.get('version')
overwrite = module.params.get('overwrite')
metadata = module.params.get('metadata')
retries = module.params.get('retries')
s3_url = module.params.get('s3_url')
src = module.params.get('src')
if overwrite not in ['always', 'never', 'different']:
if module.boolean(overwrite):
overwrite = 'always'
else:
overwrite='never'
if overwrite not in ['always', 'never', 'different']:
if module.boolean(overwrite):
overwrite = 'always'
else:
overwrite='never'
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
@ -346,41 +426,44 @@ def main():
module.fail_json(msg="Target bucket cannot be found", failed=True)
# Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
keyrtn = key_check(module, s3, bucket, obj)
keyrtn = key_check(module, s3, bucket, obj, version=version)
if keyrtn is False:
module.fail_json(msg="Target key cannot be found", failed=True)
if version is not None:
module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
# If the destination path doesn't exist, no need to md5um etag check, so just download.
pathrtn = path_check(dest)
if pathrtn is False:
download_s3file(module, s3, bucket, obj, dest)
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
# Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists.
if pathrtn is True:
md5_remote = keysum(module, s3, bucket, obj)
md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest()
md5_remote = keysum(module, s3, bucket, obj, version=version)
md5_local = get_md5_digest(dest)
if md5_local == md5_remote:
sum_matches = True
if overwrite is True:
download_s3file(module, s3, bucket, obj, dest)
if overwrite == 'always':
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
else:
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False)
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
else:
sum_matches = False
if overwrite is True:
download_s3file(module, s3, bucket, obj, dest)
if overwrite in ('always', 'different'):
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.")
# Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message.
if sum_matches is True and overwrite is False:
if sum_matches is True and overwrite == 'never':
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False)
# At this point explicitly define the overwrite condition.
if sum_matches is True and pathrtn is True and overwrite is True:
download_s3file(module, s3, bucket, obj, dest)
# If sum does not match but the destination exists, we
if sum_matches is True and pathrtn is True and overwrite == 'always':
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
# if our mode is a PUT operation (upload), go through the procedure as appropriate ...
if mode == 'put':
@ -402,30 +485,47 @@ def main():
# Lets check key state. Does it exist and if it does, compute the etag md5sum.
if bucketrtn is True and keyrtn is True:
md5_remote = keysum(module, s3, bucket, obj)
md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest()
md5_local = get_md5_digest(src)
if md5_local == md5_remote:
sum_matches = True
if overwrite is True:
upload_s3file(module, s3, bucket, obj, src, expiry, metadata)
if overwrite == 'always':
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt)
else:
get_download_url(module, s3, bucket, obj, expiry, changed=False)
else:
sum_matches = False
if overwrite is True:
upload_s3file(module, s3, bucket, obj, src, expiry, metadata)
if overwrite in ('always', 'different'):
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt)
else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.")
# If neither exist (based on bucket existence), we can create both.
if bucketrtn is False and pathrtn is True:
create_bucket(module, s3, bucket, location)
upload_s3file(module, s3, bucket, obj, src, expiry, metadata)
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt)
# If bucket exists but key doesn't, just upload.
if bucketrtn is True and pathrtn is True and keyrtn is False:
upload_s3file(module, s3, bucket, obj, src, expiry, metadata)
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt)
# Support for deleting an object if we have both params.
# Delete an object from a bucket, not the entire bucket
if mode == 'delobj':
if obj is None:
module.fail_json(msg="object parameter is required", failed=True);
if bucket:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
deletertn = delete_key(module, s3, bucket, obj)
if deletertn is True:
module.exit_json(msg="Object %s deleted from bucket %s." % (obj, bucket), changed=True)
else:
module.fail_json(msg="Bucket does not exist.", changed=False)
else:
module.fail_json(msg="Bucket parameter is required.", failed=True)
# Delete an entire bucket, including all objects in the bucket
if mode == 'delete':
if bucket:
bucketrtn = bucket_check(module, s3, bucket)
@ -484,11 +584,14 @@ def main():
if bucketrtn is False:
module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
else:
keyrtn = key_check(module, s3, bucket, obj)
keyrtn = key_check(module, s3, bucket, obj, version=version)
if keyrtn is True:
download_s3str(module, s3, bucket, obj)
download_s3str(module, s3, bucket, obj, version=version)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
if version is not None:
module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
module.exit_json(failed=False)

@ -19,7 +19,7 @@ DOCUMENTATION = '''
module: azure
short_description: create or terminate a virtual machine in azure
description:
- Creates or terminates azure instances. When created optionally waits for it to be 'running'. This module has a dependency on python-azure >= 0.7.1
- Creates or terminates azure instances. When created optionally waits for it to be 'running'.
version_added: "1.7"
options:
name:
@ -34,12 +34,12 @@ options:
default: null
subscription_id:
description:
- azure subscription id. Overrides the AZURE_SUBSCRIPTION_ID environement variable.
- azure subscription id. Overrides the AZURE_SUBSCRIPTION_ID environment variable.
required: false
default: null
management_cert_path:
description:
- path to an azure management certificate associated with the subscription id. Overrides the AZURE_CERT_PATH environement variable.
- path to an azure management certificate associated with the subscription id. Overrides the AZURE_CERT_PATH environment variable.
required: false
default: null
storage_account:
@ -53,7 +53,7 @@ options:
default: null
role_size:
description:
- azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6)
- azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6). You have to pay attention to the fact that instances of type G and DS are not available in all regions (locations). Make sure if you selected the size and type of instance available in your chosen location.
required: false
default: Small
endpoints:
@ -110,9 +110,39 @@ options:
required: false
default: 'present'
aliases: []
requirements: [ "azure" ]
author: John Whitbeck
reset_pass_atlogon:
description:
- Reset the admin password on first logon for windows hosts
required: false
default: "no"
version_added: "2.0"
choices: [ "yes", "no" ]
auto_updates:
description:
- Enable Auto Updates on Windows Machines
required: false
version_added: "2.0"
default: "no"
choices: [ "yes", "no" ]
enable_winrm:
description:
- Enable winrm on Windows Machines
required: false
version_added: "2.0"
default: "yes"
choices: [ "yes", "no" ]
os_type:
description:
- The type of the os that is gettings provisioned
required: false
version_added: "2.0"
default: "linux"
choices: [ "windows", "linux" ]
requirements:
- "python >= 2.6"
- "azure >= 0.7.1"
author: "John Whitbeck (@jwhitbeck)"
'''
EXAMPLES = '''
@ -136,14 +166,37 @@ EXAMPLES = '''
module: azure
name: my-virtual-machine
state: absent
#Create windows machine
- hosts: all
connection: local
tasks:
- local_action:
module: azure
name: "ben-Winows-23"
hostname: "win123"
os_type: windows
enable_winrm: yes
subscription_id: "{{ azure_sub_id }}"
management_cert_path: "{{ azure_cert_path }}"
role_size: Small
image: 'bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-v13.5'
location: 'East Asia'
password: "xxx"
storage_account: benooytes
user: admin
wait: yes
virtual_network_name: "{{ vnet_name }}"
'''
import base64
import datetime
import os
import sys
import time
from urlparse import urlparse
from ansible.module_utils.facts import * # TimeoutError
AZURE_LOCATIONS = ['South Central US',
'Central US',
@ -182,9 +235,17 @@ AZURE_ROLE_SIZES = ['ExtraSmall',
'Standard_D12',
'Standard_D13',
'Standard_D14',
'Standard_DS1',
'Standard_DS2',
'Standard_DS3',
'Standard_DS4',
'Standard_DS11',
'Standard_DS12',
'Standard_DS13',
'Standard_DS14',
'Standard_G1',
'Standard_G2',
'Sandard_G3',
'Standard_G3',
'Standard_G4',
'Standard_G5']
@ -194,10 +255,10 @@ try:
from azure import WindowsAzureError, WindowsAzureMissingResourceError
from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys,
PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints,
ConfigurationSetInputEndpoint)
ConfigurationSetInputEndpoint, Listener, WindowsConfigurationSet)
HAS_AZURE = True
except ImportError:
print "failed=True msg='azure required for this module'"
sys.exit(1)
HAS_AZURE = False
from distutils.version import LooseVersion
from types import MethodType
@ -215,6 +276,23 @@ def _wait_for_completion(azure, promise, wait_timeout, msg):
raise WindowsAzureError('Timed out waiting for async operation ' + msg + ' "' + str(promise.request_id) + '" to complete.')
def _delete_disks_when_detached(azure, wait_timeout, disk_names):
def _handle_timeout(signum, frame):
raise TimeoutError("Timeout reached while waiting for disks to become detached.")
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(wait_timeout)
try:
while len(disk_names) > 0:
for disk_name in disk_names:
disk = azure.get_disk(disk_name)
if disk.attached_to is None:
azure.delete_disk(disk.name, True)
disk_names.remove(disk_name)
except WindowsAzureError, e:
module.fail_json(msg="failed to get or delete disk, error was: %s" % (disk_name, str(e)))
finally:
signal.alarm(0)
def get_ssh_certificate_tokens(module, ssh_cert_path):
"""
@ -242,9 +320,10 @@ def create_virtual_machine(module, azure):
azure: authenticated azure ServiceManagementService object
Returns:
True if a new virtual machine was created, false otherwise
True if a new virtual machine and/or cloud service was created, false otherwise
"""
name = module.params.get('name')
os_type = module.params.get('os_type')
hostname = module.params.get('hostname') or name + ".cloudapp.net"
endpoints = module.params.get('endpoints').split(',')
ssh_cert_path = module.params.get('ssh_cert_path')
@ -258,22 +337,39 @@ def create_virtual_machine(module, azure):
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
# Check if a deployment with the same name already exists
cloud_service_name_available = azure.check_hosted_service_name_availability(name)
if not cloud_service_name_available.result:
changed = False
else:
changed = True
# Create cloud service if necessary
if cloud_service_name_available.result:
# cloud service does not exist; create it
try:
result = azure.create_hosted_service(service_name=name, label=name, location=location)
_wait_for_completion(azure, result, wait_timeout, "create_hosted_service")
except WindowsAzureError as e:
module.fail_json(msg="failed to create the new service name, it already exists: %s" % str(e))
changed = True
except WindowsAzureError, e:
module.fail_json(msg="failed to create the new service, error was: %s" % str(e))
# Create linux configuration
disable_ssh_password_authentication = not password
linux_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication)
try:
# check to see if a vm with this name exists; if so, do nothing
azure.get_role(name, name, name)
except WindowsAzureMissingResourceError:
# vm does not exist; create it
if os_type == 'linux':
# Create linux configuration
disable_ssh_password_authentication = not password
vm_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication)
else:
#Create Windows Config
vm_config = WindowsConfigurationSet(hostname, password, module.params.get('reset_pass_atlogon'),\
module.params.get('auto_updates'), None, user)
vm_config.domain_join = None
if module.params.get('enable_winrm'):
listener = Listener('Http')
vm_config.win_rm.listeners.listeners.append(listener)
else:
vm_config.win_rm = None
# Add ssh certificates if specified
if ssh_cert_path:
@ -288,7 +384,7 @@ def create_virtual_machine(module, azure):
authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user
ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint))
# Append ssh config to linux machine config
linux_config.ssh = ssh_config
vm_config.ssh = ssh_config
# Create network configuration
network_config = ConfigurationSetInputEndpoints()
@ -315,21 +411,21 @@ def create_virtual_machine(module, azure):
deployment_slot='production',
label=name,
role_name=name,
system_config=linux_config,
system_config=vm_config,
network_config=network_config,
os_virtual_hard_disk=os_hd,
role_size=role_size,
role_type='PersistentVMRole',
virtual_network_name=virtual_network_name)
_wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment")
except WindowsAzureError as e:
changed = True
except WindowsAzureError, e:
module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e))
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
return (changed, urlparse(deployment.url).hostname, deployment)
except WindowsAzureError as e:
except WindowsAzureError, e:
module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e)))
@ -340,8 +436,6 @@ def terminate_virtual_machine(module, azure):
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Not yet supported: handle deletion of attached data disks.
Returns:
True if a new virtual machine was deleted, false otherwise
"""
@ -359,9 +453,9 @@ def terminate_virtual_machine(module, azure):
disk_names = []
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
except WindowsAzureMissingResourceError as e:
except WindowsAzureMissingResourceError, e:
pass # no such deployment or service
except WindowsAzureError as e:
except WindowsAzureError, e:
module.fail_json(msg="failed to find the deployment, error was: %s" % str(e))
# Delete deployment
@ -374,17 +468,28 @@ def terminate_virtual_machine(module, azure):
role_props = azure.get_role(name, deployment.name, role.role_name)
if role_props.os_virtual_hard_disk.disk_name not in disk_names:
disk_names.append(role_props.os_virtual_hard_disk.disk_name)
except WindowsAzureError, e:
module.fail_json(msg="failed to get the role %s, error was: %s" % (role.role_name, str(e)))
try:
result = azure.delete_deployment(name, deployment.name)
_wait_for_completion(azure, result, wait_timeout, "delete_deployment")
except WindowsAzureError, e:
module.fail_json(msg="failed to delete the deployment %s, error was: %s" % (deployment.name, str(e)))
for disk_name in disk_names:
azure.delete_disk(disk_name, True)
# It's unclear when disks associated with terminated deployment get detatched.
# Thus, until the wait_timeout is reached, we continue to delete disks as they
# become detatched by polling the list of remaining disks and examining the state.
try:
_delete_disks_when_detached(azure, wait_timeout, disk_names)
except (WindowsAzureError, TimeoutError), e:
module.fail_json(msg=str(e))
try:
# Now that the vm is deleted, remove the cloud service
result = azure.delete_hosted_service(service_name=name)
_wait_for_completion(azure, result, wait_timeout, "delete_hosted_service")
except WindowsAzureError as e:
except WindowsAzureError, e:
module.fail_json(msg="failed to delete the service %s, error was: %s" % (name, str(e)))
public_dns_name = urlparse(deployment.url).hostname
@ -392,7 +497,7 @@ def terminate_virtual_machine(module, azure):
def get_azure_creds(module):
# Check modul args for credentials, then check environment vars
# Check module args for credentials, then check environment vars
subscription_id = module.params.get('subscription_id')
if not subscription_id:
subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID', None)
@ -414,6 +519,7 @@ def main():
ssh_cert_path=dict(),
name=dict(),
hostname=dict(),
os_type=dict(default='linux', choices=['linux', 'windows']),
location=dict(choices=AZURE_LOCATIONS),
role_size=dict(choices=AZURE_ROLE_SIZES),
subscription_id=dict(no_log=True),
@ -427,9 +533,14 @@ def main():
state=dict(default='present'),
wait=dict(type='bool', default=False),
wait_timeout=dict(default=600),
wait_timeout_redirects=dict(default=300)
wait_timeout_redirects=dict(default=300),
reset_pass_atlogon=dict(type='bool', default=False),
auto_updates=dict(type='bool', default=False),
enable_winrm=dict(type='bool', default=True),
)
)
if not HAS_AZURE:
module.fail_json(msg='azure python module required for this module')
# create azure ServiceManagementService object
subscription_id, management_cert_path = get_azure_creds(module)
@ -443,7 +554,7 @@ def main():
cloud_service_raw = None
if module.params.get('state') == 'absent':
(changed, public_dns_name, deployment) = terminate_virtual_machine(module, azure)
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('name'):
@ -486,7 +597,7 @@ class Wrapper(object):
while wait_timeout > time.time():
try:
return f()
except WindowsAzureError as e:
except WindowsAzureError, e:
if not str(e).lower().find("temporary redirect") == -1:
time.sleep(5)
pass
@ -496,5 +607,5 @@ class Wrapper(object):
# import module snippets
from ansible.module_utils.basic import *
main()
if __name__ == '__main__':
main()

@ -22,6 +22,7 @@ short_description: Create/delete a droplet/SSH_key in DigitalOcean
description:
- Create/delete a droplet in DigitalOcean and optionally wait for it to be 'running', or deploy an SSH key.
version_added: "1.3"
author: "Vincent Viallet (@zbal)"
options:
command:
description:
@ -59,7 +60,7 @@ options:
- This is the slug of the region you would like your server to be created in.
ssh_key_ids:
description:
- Optional, array of of ssh_key_ids that you would like to be added to the server.
- Optional, array of of SSH key (numeric) ID that you would like to be added to the server.
virtio:
description:
- "Bool, turn on virtio driver in droplet for improved network and storage I/O."
@ -99,8 +100,11 @@ options:
notes:
- Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token.
- Version 2 of DigitalOcean API is used.
requirements: [ dopy ]
- As of Ansible 2.0, Version 2 of the DigitalOcean API is used.
- As of Ansible 2.0, the above parameters were changed significantly. If you are running 1.9.x or earlier, please use C(ansible-doc digital_ocean) to view the correct parameters for your version. Dedicated web docs will be available in the near future for the stable branch.
requirements:
- "python >= 2.6"
- dopy
'''
@ -154,7 +158,7 @@ EXAMPLES = '''
- digital_ocean: >
state=present
ssh_key_ids=[id1,id2]
ssh_key_ids=123,456
name=mydroplet
api_token=XXX
size_id=2gb
@ -162,20 +166,18 @@ EXAMPLES = '''
image_id=fedora-19-x64
'''
import sys
import os
import time
from distutils.version import LooseVersion
HAS_DOPY = True
try:
import dopy
from dopy.manager import DoError, DoManager
except ImportError, e:
print "failed=True msg='dopy >= 0.3.2 required for this module'"
sys.exit(1)
if dopy.__version__ < '0.3.2':
print "failed=True msg='dopy >= 0.3.2 required for this module'"
sys.exit(1)
if LooseVersion(dopy.__version__) < LooseVersion('0.3.2'):
HAS_DOPY = False
except ImportError:
HAS_DOPY = False
class TimeoutError(DoError):
def __init__(self, msg, id):
@ -398,7 +400,7 @@ def main():
size_id = dict(),
image_id = dict(),
region_id = dict(),
ssh_key_ids = dict(default=''),
ssh_key_ids = dict(type='list'),
virtio = dict(type='bool', default='yes'),
private_networking = dict(type='bool', default='no'),
backups_enabled = dict(type='bool', default='no'),
@ -421,6 +423,8 @@ def main():
['id', 'name'],
),
)
if not HAS_DOPY:
module.fail_json(msg='dopy >= 0.3.2 required for this module')
try:
core(module)
@ -432,4 +436,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
main()
if __name__ == '__main__':
main()

@ -22,6 +22,7 @@ short_description: Create/delete a DNS record in DigitalOcean
description:
- Create/delete a DNS record in DigitalOcean.
version_added: "1.6"
author: "Michael Gregson (@mgregson)"
options:
state:
description:
@ -47,6 +48,10 @@ options:
notes:
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
- Version 1 of DigitalOcean API is used.
requirements:
- "python >= 2.6"
- dopy
'''
@ -74,15 +79,14 @@ EXAMPLES = '''
ip={{ test_droplet.droplet.ip_address }}
'''
import sys
import os
import time
try:
from dopy.manager import DoError, DoManager
HAS_DOPY = True
except ImportError as e:
print "failed=True msg='dopy required for this module'"
sys.exit(1)
HAS_DOPY = False
class TimeoutError(DoError):
def __init__(self, msg, id):
@ -229,6 +233,8 @@ def main():
['id', 'name'],
),
)
if not HAS_DOPY:
module.fail_json(msg='dopy required for this module')
try:
core(module)
@ -239,5 +245,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
main()
if __name__ == '__main__':
main()

@ -22,6 +22,7 @@ short_description: Create/delete an SSH key in DigitalOcean
description:
- Create/delete an SSH key.
version_added: "1.6"
author: "Michael Gregson (@mgregson)"
options:
state:
description:
@ -47,6 +48,9 @@ options:
notes:
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
- Version 1 of DigitalOcean API is used.
requirements:
- "python >= 2.6"
- dopy
'''
@ -64,15 +68,14 @@ EXAMPLES = '''
'''
import sys
import os
import time
try:
from dopy.manager import DoError, DoManager
except ImportError as e:
print "failed=True msg='dopy required for this module'"
sys.exit(1)
HAS_DOPY = True
except ImportError:
HAS_DOPY = False
class TimeoutError(DoError):
def __init__(self, msg, id):
@ -165,6 +168,8 @@ def main():
['id', 'name'],
),
)
if not HAS_DOPY:
module.fail_json(msg='dopy required for this module')
try:
core(module)
@ -175,5 +180,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
main()
if __name__ == '__main__':
main()

@ -59,10 +59,10 @@ options:
version_added: "1.5"
ports:
description:
- List containing private to public port mapping specification. Use docker
- 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000)'
- where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is
- a host interface.
- "List containing private to public port mapping specification.
Use docker 'CLI-style syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000)'
where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is - a host interface.
The container ports need to be exposed either in the Dockerfile or via the C(expose) option."
default: null
version_added: "1.5"
expose:
@ -92,6 +92,23 @@ options:
- 'alias. Use docker CLI-style syntax: C(redis:myredis).'
default: null
version_added: "1.5"
log_driver:
description:
- You can specify a different logging driver for the container than for the daemon.
"json-file" Default logging driver for Docker. Writes JSON messages to file.
docker logs command is available only for this logging driver.
"none" disables any logging for the container. docker logs won't be available with this driver.
"syslog" Syslog logging driver for Docker. Writes log messages to syslog.
docker logs command is not available for this logging driver.
If not defined explicitly, the Docker daemon's default ("json-file") will apply.
Requires docker >= 1.6.0.
required: false
default: json-file
choices:
- json-file
- none
- syslog
version_added: "2.0"
memory_limit:
description:
- RAM allocated to the container as a number of bytes or as a human-readable
@ -246,6 +263,9 @@ options:
retries.
default: 0
version_added: "1.9"
extra_hosts:
description:
- Dict of custom host-to-IP mappings to be defined in the container
insecure_registry:
description:
- Use insecure private registry by HTTP instead of HTTPS. Needed for
@ -253,8 +273,15 @@ options:
default: false
version_added: "1.9"
author: Cove Schneider, Joshua Conner, Pavel Antonov, Ash Wilson
requirements: [ "docker-py >= 0.3.0", "docker >= 0.10.0" ]
author:
- "Cove Schneider (@cove)"
- "Joshua Conner (@joshuaconner)"
- "Pavel Antonov (@softzilla)"
- "Ash Wilson (@smashwilson)"
requirements:
- "python >= 2.6"
- "docker-py >= 0.3.0"
- "The docker server >= 0.10.0"
'''
EXAMPLES = '''
@ -370,6 +397,13 @@ if HAS_DOCKER_PY:
from docker.errors import APIError as DockerAPIError
except ImportError:
from docker.client import APIError as DockerAPIError
try:
# docker-py 1.2+
import docker.constants
DEFAULT_DOCKER_API_VERSION = docker.constants.DEFAULT_DOCKER_API_VERSION
except (ImportError, AttributeError):
# docker-py less than 1.2
DEFAULT_DOCKER_API_VERSION = docker.client.DEFAULT_DOCKER_API_VERSION
def _human_to_bytes(number):
@ -386,8 +420,7 @@ def _human_to_bytes(number):
return int(number[:-len(each)]) * (1024 ** i)
i = i + 1
print "failed=True msg='Could not convert %s to integer'" % (number)
sys.exit(1)
raise ValueError('Could not convert %s to integer' % (number,))
def _ansible_facts(container_list):
@ -492,7 +525,9 @@ class DockerManager(object):
'dns': ((0, 3, 0), '1.10'),
'volumes_from': ((0, 3, 0), '1.10'),
'restart_policy': ((0, 5, 0), '1.14'),
'extra_hosts': ((0, 7, 0), '1.3.1'),
'pid': ((1, 0, 0), '1.17'),
'log_driver': ((1, 2, 0), '1.18'),
# Clientside only
'insecure_registry': ((0, 5, 0), '0.0')
}
@ -528,7 +563,7 @@ class DockerManager(object):
self.lxc_conf = []
options = self.module.params.get('lxc_conf')
for option in options:
parts = option.split(':')
parts = option.split(':', 1)
self.lxc_conf.append({"Key": parts[0], "Value": parts[1]})
self.exposed_ports = None
@ -560,8 +595,6 @@ class DockerManager(object):
docker_url = 'unix://var/run/docker.sock'
docker_api_version = module.params.get('docker_api_version')
if not docker_api_version:
docker_api_version=docker.client.DEFAULT_DOCKER_API_VERSION
tls_client_cert = module.params.get('tls_client_cert', None)
if not tls_client_cert and env_cert_path:
@ -630,7 +663,7 @@ class DockerManager(object):
self.docker_py_versioninfo = get_docker_py_versioninfo()
def _check_capabilties(self):
def _check_capabilities(self):
"""
Create a list of available capabilities
"""
@ -651,7 +684,7 @@ class DockerManager(object):
we lack the capability.
"""
if not self._capabilities:
self._check_capabilties()
self._check_capabilities()
if capability in self._capabilities:
return True
@ -728,7 +761,7 @@ class DockerManager(object):
elif p_len == 3:
# Bind `container_port` of the container to port `parts[1]` on
# IP `parts[0]` of the host machine. If `parts[1]` empty bind
# to a dynamically allocacted port of IP `parts[0]`.
# to a dynamically allocated port of IP `parts[0]`.
bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],)
if container_port in binds:
@ -811,7 +844,7 @@ class DockerManager(object):
for image in self.client.images(name=image):
if resource in image.get('RepoTags', []):
return image['RepoTags']
return None
return []
def get_inspect_containers(self, containers):
inspect = []
@ -899,7 +932,11 @@ class DockerManager(object):
# MEM_LIMIT
expected_mem = _human_to_bytes(self.module.params.get('memory_limit'))
try:
expected_mem = _human_to_bytes(self.module.params.get('memory_limit'))
except ValueError as e:
self.module.fail_json(msg=str(e))
actual_mem = container['Config']['Memory']
if expected_mem and actual_mem != expected_mem:
@ -1095,6 +1132,16 @@ class DockerManager(object):
self.reload_reasons.append('volumes_from ({0} => {1})'.format(actual_volumes_from, expected_volumes_from))
differing.append(container)
# LOG_DRIVER
if self.ensure_capability('log_driver', False) :
expected_log_driver = self.module.params.get('log_driver') or 'json-file'
actual_log_driver = container['HostConfig']['LogConfig']['Type']
if actual_log_driver != expected_log_driver:
self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver))
differing.append(container)
continue
return differing
def get_deployed_containers(self):
@ -1118,17 +1165,20 @@ class DockerManager(object):
else:
repo_tags = [normalize_image(self.module.params.get('image'))]
for i in self.client.containers(all=True):
for container in self.client.containers(all=True):
details = None
if name:
matches = name in i.get('Names', [])
name_list = container.get('Names')
if name_list is None:
name_list = []
matches = name in name_list
else:
details = self.client.inspect_container(i['Id'])
details = self.client.inspect_container(container['Id'])
details = _docker_id_quirk(details)
running_image = normalize_image(details['Config']['Image'])
running_command = i['Command'].strip()
running_command = container['Command'].strip()
image_matches = running_image in repo_tags
@ -1140,7 +1190,7 @@ class DockerManager(object):
if matches:
if not details:
details = self.client.inspect_container(i['Id'])
details = self.client.inspect_container(container['Id'])
details = _docker_id_quirk(details)
deployed.append(details)
@ -1188,39 +1238,7 @@ class DockerManager(object):
except Exception as e:
self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e))
def create_containers(self, count=1):
params = {'image': self.module.params.get('image'),
'command': self.module.params.get('command'),
'ports': self.exposed_ports,
'volumes': self.volumes,
'mem_limit': _human_to_bytes(self.module.params.get('memory_limit')),
'environment': self.env,
'hostname': self.module.params.get('hostname'),
'domainname': self.module.params.get('domainname'),
'detach': self.module.params.get('detach'),
'name': self.module.params.get('name'),
'stdin_open': self.module.params.get('stdin_open'),
'tty': self.module.params.get('tty'),
}
def do_create(count, params):
results = []
for _ in range(count):
result = self.client.create_container(**params)
self.increment_counter('created')
results.append(result)
return results
try:
containers = do_create(count, params)
except:
self.pull_image()
containers = do_create(count, params)
return containers
def start_containers(self, containers):
def create_host_config(self):
params = {
'lxc_conf': self.lxc_conf,
'binds': self.binds,
@ -1233,7 +1251,7 @@ class DockerManager(object):
optionals = {}
for optional_param in ('dns', 'volumes_from', 'restart_policy',
'restart_policy_retry', 'pid'):
'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver'):
optionals[optional_param] = self.module.params.get(optional_param)
if optionals['dns'] is not None:
@ -1254,8 +1272,59 @@ class DockerManager(object):
self.ensure_capability('pid')
params['pid_mode'] = optionals['pid']
if optionals['extra_hosts'] is not None:
self.ensure_capability('extra_hosts')
params['extra_hosts'] = optionals['extra_hosts']
if optionals['log_driver'] is not None:
self.ensure_capability('log_driver')
log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON)
log_config.type = optionals['log_driver']
params['log_config'] = log_config
return docker.utils.create_host_config(**params)
def create_containers(self, count=1):
try:
mem_limit = _human_to_bytes(self.module.params.get('memory_limit'))
except ValueError as e:
self.module.fail_json(msg=str(e))
params = {'image': self.module.params.get('image'),
'command': self.module.params.get('command'),
'ports': self.exposed_ports,
'volumes': self.volumes,
'mem_limit': mem_limit,
'environment': self.env,
'hostname': self.module.params.get('hostname'),
'domainname': self.module.params.get('domainname'),
'detach': self.module.params.get('detach'),
'name': self.module.params.get('name'),
'stdin_open': self.module.params.get('stdin_open'),
'tty': self.module.params.get('tty'),
'host_config': self.create_host_config(),
}
def do_create(count, params):
results = []
for _ in range(count):
result = self.client.create_container(**params)
self.increment_counter('created')
results.append(result)
return results
try:
containers = do_create(count, params)
except:
self.pull_image()
containers = do_create(count, params)
return containers
def start_containers(self, containers):
for i in containers:
self.client.start(i['Id'], **params)
self.client.start(i)
self.increment_counter('started')
def stop_containers(self, containers):
@ -1425,7 +1494,7 @@ def main():
tls_client_key = dict(required=False, default=None, type='str'),
tls_ca_cert = dict(required=False, default=None, type='str'),
tls_hostname = dict(required=False, type='str', default=None),
docker_api_version = dict(),
docker_api_version = dict(required=False, default=DEFAULT_DOCKER_API_VERSION, type='str'),
username = dict(default=None),
password = dict(),
email = dict(),
@ -1438,6 +1507,7 @@ def main():
state = dict(default='started', choices=['present', 'started', 'reloaded', 'restarted', 'stopped', 'killed', 'absent', 'running']),
restart_policy = dict(default=None, choices=['always', 'on-failure', 'no']),
restart_policy_retry = dict(default=0, type='int'),
extra_hosts = dict(type='dict'),
debug = dict(default=False, type='bool'),
privileged = dict(default=False, type='bool'),
stdin_open = dict(default=False, type='bool'),
@ -1447,6 +1517,7 @@ def main():
net = dict(default=None),
pid = dict(default=None),
insecure_registry = dict(default=False, type='bool'),
log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']),
),
required_together = (
['tls_client_cert', 'tls_client_key'],

@ -23,8 +23,7 @@
DOCUMENTATION = '''
---
module: docker_image
deprecated: "functions are being rolled into the 'docker' module"
author: Pavel Antonov
author: "Pavel Antonov (@softzilla)"
version_added: "1.5"
short_description: manage docker images
description:
@ -36,6 +35,12 @@ options:
required: false
default: null
aliases: []
dockerfile:
description:
- Dockerfile to use
required: false
default: Dockerfile
version_added: "2.0"
name:
description:
- Image name to work with
@ -60,6 +65,12 @@ options:
required: false
default: unix://var/run/docker.sock
aliases: []
docker_api_version:
description:
- Remote API version to use. This defaults to the current default as
specified by docker-py.
default: docker-py default remote API version
version_added: "2.0"
state:
description:
- Set the state of the image
@ -73,7 +84,10 @@ options:
required: false
default: 600
aliases: []
requirements: [ "docker-py" ]
requirements:
- "python >= 2.6"
- "docker-py"
- "requests"
'''
EXAMPLES = '''
@ -103,32 +117,54 @@ Remove image from local docker storage:
'''
import re
from urlparse import urlparse
try:
import sys
import re
import json
import docker.client
except ImportError:
import simplejson as json
try:
from requests.exceptions import *
from urlparse import urlparse
except ImportError, e:
print "failed=True msg='failed to import python module: %s'" % e
sys.exit(1)
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
try:
from docker.errors import APIError as DockerAPIError
import docker.client
HAS_DOCKER_CLIENT = True
except ImportError:
from docker.client import APIError as DockerAPIError
HAS_DOCKER_CLIENT = False
if HAS_DOCKER_CLIENT:
try:
from docker.errors import APIError as DockerAPIError
except ImportError:
from docker.client import APIError as DockerAPIError
try:
# docker-py 1.2+
import docker.constants
DEFAULT_DOCKER_API_VERSION = docker.constants.DEFAULT_DOCKER_API_VERSION
except (ImportError, AttributeError):
# docker-py less than 1.2
DEFAULT_DOCKER_API_VERSION = docker.client.DEFAULT_DOCKER_API_VERSION
class DockerImageManager:
def __init__(self, module):
self.module = module
self.path = self.module.params.get('path')
self.dockerfile = self.module.params.get('dockerfile')
self.name = self.module.params.get('name')
self.tag = self.module.params.get('tag')
self.nocache = self.module.params.get('nocache')
docker_url = urlparse(module.params.get('docker_url'))
self.client = docker.Client(base_url=docker_url.geturl(), timeout=module.params.get('timeout'))
self.client = docker.Client(
base_url=docker_url.geturl(),
version=module.params.get('docker_api_version'),
timeout=module.params.get('timeout'))
self.changed = False
self.log = []
self.error_msg = None
@ -137,7 +173,7 @@ class DockerImageManager:
return "".join(self.log) if as_string else self.log
def build(self):
stream = self.client.build(self.path, tag=':'.join([self.name, self.tag]), nocache=self.nocache, rm=True, stream=True)
stream = self.client.build(self.path, dockerfile=self.dockerfile, tag=':'.join([self.name, self.tag]), nocache=self.nocache, rm=True, stream=True)
success_search = r'Successfully built ([0-9a-f]+)'
image_id = None
self.changed = True
@ -201,15 +237,23 @@ class DockerImageManager:
def main():
module = AnsibleModule(
argument_spec = dict(
path = dict(required=False, default=None),
name = dict(required=True),
tag = dict(required=False, default="latest"),
nocache = dict(default=False, type='bool'),
state = dict(default='present', choices=['absent', 'present', 'build']),
docker_url = dict(default='unix://var/run/docker.sock'),
timeout = dict(default=600, type='int'),
path = dict(required=False, default=None),
dockerfile = dict(required=False, default="Dockerfile"),
name = dict(required=True),
tag = dict(required=False, default="latest"),
nocache = dict(default=False, type='bool'),
state = dict(default='present', choices=['absent', 'present', 'build']),
docker_url = dict(default='unix://var/run/docker.sock'),
docker_api_version = dict(required=False,
default=DEFAULT_DOCKER_API_VERSION,
type='str'),
timeout = dict(default=600, type='int'),
)
)
if not HAS_DOCKER_CLIENT:
module.fail_json(msg='docker-py is needed for this module')
if not HAS_REQUESTS:
module.fail_json(msg='requests is needed for this module')
try:
manager = DockerImageManager(module)
@ -246,8 +290,8 @@ def main():
except RequestException as e:
module.exit_json(failed=True, changed=manager.has_changed(), msg=repr(e))
# import module snippets
from ansible.module_utils.basic import *
main()
if __name__ == '__main__':
main()

@ -18,34 +18,29 @@ DOCUMENTATION = '''
---
module: gc_storage
version_added: "1.4"
short_description: This module manages objects/buckets in Google Cloud Storage.
short_description: This module manages objects/buckets in Google Cloud Storage.
description:
- This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for information about setting the default project.
options:
bucket:
description:
- Bucket name.
- Bucket name.
required: true
default: null
aliases: []
object:
description:
- Keyname of the object inside the bucket. Can be also be used to create "virtual directories" (see examples).
required: false
default: null
aliases: []
src:
description:
- The source file path when performing a PUT operation.
required: false
default: null
aliases: []
dest:
description:
- The destination file path when downloading an object/key with a GET operation.
required: false
aliases: []
force:
description:
- Forces an overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
@ -56,29 +51,27 @@ options:
description:
- This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'authenticated-read'.
required: false
default: private
default: private
headers:
version_added: 2.0
description:
- Headers to attach to object.
required: false
default: {}
default: '{}'
expiration:
description:
- Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only avaialbe when public-read is the acl for the object.
- Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only available when public-read is the acl for the object.
required: false
default: null
aliases: []
mode:
description:
- Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and delete (bucket).
- Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and delete (bucket).
required: true
default: null
aliases: []
choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ]
gcs_secret_key:
description:
- GCS secret key. If not set then the value of the GCS_SECRET_KEY environment variable is used.
- GCS secret key. If not set then the value of the GCS_SECRET_KEY environment variable is used.
required: true
default: null
gcs_access_key:
@ -87,9 +80,11 @@ options:
required: true
default: null
requirements: [ "boto 2.9+" ]
requirements:
- "python >= 2.6"
- "boto >= 2.9"
author: benno@ansible.com Note. Most of the code has been taken from the S3 module.
author: "Benno Joy (@bennojoy)"
'''
@ -116,16 +111,15 @@ EXAMPLES = '''
- gc_storage: bucket=mybucket mode=delete
'''
import sys
import os
import urlparse
import hashlib
try:
import boto
HAS_BOTO = True
except ImportError:
print "failed=True msg='boto 2.9+ required for this module'"
sys.exit(1)
HAS_BOTO = False
def grant_check(module, gs, obj):
try:
@ -377,6 +371,9 @@ def main():
),
)
if not HAS_BOTO:
module.fail_json(msg='boto 2.9+ required for this module')
bucket = module.params.get('bucket')
obj = module.params.get('object')
src = module.params.get('src')
@ -445,5 +442,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
main()
if __name__ == '__main__':
main()

@ -58,6 +58,14 @@ options:
required: false
default: null
aliases: []
service_account_permissions:
version_added: 2.0
description:
- service account permissions (see U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), --scopes section for detailed information)
required: false
default: null
aliases: []
choices: ["bigquery", "cloud-platform", "compute-ro", "compute-rw", "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write", "monitoring", "sql", "sql-admin", "storage-full", "storage-ro", "storage-rw", "taskqueue", "userinfo-email"]
pem_file:
version_added: 1.5.1
description:
@ -137,10 +145,12 @@ options:
default: "true"
aliases: []
requirements: [ "libcloud" ]
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3"
notes:
- Either I(name) or I(instance_names) is required.
author: Eric Johnson <erjohnso@google.com>
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
'''
EXAMPLES = '''
@ -202,25 +212,21 @@ EXAMPLES = '''
'''
import sys
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support (0.13.3+) required for this module'")
sys.exit(1)
HAS_LIBCLOUD = False
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
print("failed=True " + \
"msg='GCE module requires python's 'ast' module, python v2.6+'")
sys.exit(1)
HAS_PYTHON26 = False
def get_instance_info(inst):
@ -289,6 +295,8 @@ def create_instances(module, gce, instance_names):
ip_forward = module.params.get('ip_forward')
external_ip = module.params.get('external_ip')
disk_auto_delete = module.params.get('disk_auto_delete')
service_account_permissions = module.params.get('service_account_permissions')
service_account_email = module.params.get('service_account_email')
if external_ip == "none":
external_ip = None
@ -323,17 +331,29 @@ def create_instances(module, gce, instance_names):
if not isinstance(md, dict):
raise ValueError('metadata must be a dict')
except ValueError, e:
print("failed=True msg='bad metadata: %s'" % str(e))
sys.exit(1)
module.fail_json(msg='bad metadata: %s' % str(e))
except SyntaxError, e:
print("failed=True msg='bad metadata syntax'")
sys.exit(1)
module.fail_json(msg='bad metadata syntax')
items = []
for k,v in md.items():
items.append({"key": k,"value": v})
metadata = {'items': items}
ex_sa_perms = []
bad_perms = []
if service_account_permissions:
for perm in service_account_permissions:
if not perm in gce.SA_SCOPES_MAP.keys():
bad_perms.append(perm)
if len(bad_perms) > 0:
module.fail_json(msg='bad permissions: %s' % str(bad_perms))
if service_account_email:
ex_sa_perms.append({'email': service_account_email})
else:
ex_sa_perms.append({'email': "default"})
ex_sa_perms[0]['scopes'] = service_account_permissions
# These variables all have default values but check just in case
if not lc_image or not lc_network or not lc_machine_type or not lc_zone:
module.fail_json(msg='Missing required create instance variable',
@ -353,7 +373,7 @@ def create_instances(module, gce, instance_names):
inst = gce.create_node(name, lc_machine_type, lc_image,
location=lc_zone, ex_network=network, ex_tags=tags,
ex_metadata=metadata, ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete)
external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete, ex_service_accounts=ex_sa_perms)
changed = True
except ResourceExistsError:
inst = gce.ex_get_node(name, lc_zone)
@ -441,6 +461,7 @@ def main():
tags = dict(type='list'),
zone = dict(default='us-central1-a'),
service_account_email = dict(),
service_account_permissions = dict(type='list'),
pem_file = dict(),
project_id = dict(),
ip_forward = dict(type='bool', default=False),
@ -450,6 +471,11 @@ def main():
)
)
if not HAS_PYTHON26:
module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module')
gce = gce_connect(module)
image = module.params.get('image')
@ -503,11 +529,10 @@ def main():
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
main()
if __name__ == '__main__':
main()

@ -131,8 +131,10 @@ options:
default: null
aliases: []
requirements: [ "libcloud" ]
author: Eric Johnson <erjohnso@google.com>
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3"
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
'''
EXAMPLES = '''
@ -147,9 +149,6 @@ EXAMPLES = '''
httphealthcheck_path: "/up"
'''
import sys
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
@ -158,10 +157,9 @@ try:
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support required for this module.'")
sys.exit(1)
HAS_LIBCLOUD = False
def main():
@ -188,6 +186,9 @@ def main():
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.13.3+) required for this module.')
gce = gce_connect(module)
httphealthcheck_name = module.params.get('httphealthcheck_name')
@ -325,11 +326,11 @@ def main():
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
main()
if __name__ == '__main__':
main()

@ -22,7 +22,7 @@ module: gce_net
version_added: "1.5"
short_description: create/destroy GCE networks and firewall rules
description:
- This module can create and destroy Google Compue Engine networks and
- This module can create and destroy Google Compute Engine networks and
firewall rules U(https://developers.google.com/compute/docs/networking).
The I(name) parameter is reserved for referencing a network while the
I(fwname) parameter is used to reference firewall rules.
@ -102,8 +102,10 @@ options:
default: null
aliases: []
requirements: [ "libcloud" ]
author: Eric Johnson <erjohnso@google.com>
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3"
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
'''
EXAMPLES = '''
@ -123,18 +125,15 @@ EXAMPLES = '''
'''
import sys
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support required for this module.'")
sys.exit(1)
HAS_LIBCLOUD = False
def format_allowed_section(allowed):
"""Format each section of the allowed list"""
@ -182,6 +181,9 @@ def main():
)
)
if not HAS_LIBCLOUD:
module.exit_json(msg='libcloud with GCE support (0.13.3+) required for this module')
gce = gce_connect(module)
allowed = module.params.get('allowed')
@ -281,11 +283,11 @@ def main():
changed = True
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
main()
if __name__ == '__main__':
main()

@ -117,8 +117,10 @@ options:
choices: ["pd-standard", "pd-ssd"]
aliases: []
requirements: [ "libcloud" ]
author: Eric Johnson <erjohnso@google.com>
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3"
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
'''
EXAMPLES = '''
@ -130,18 +132,15 @@ EXAMPLES = '''
name: pd
'''
import sys
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, ResourceInUseError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support is required for this module.'")
sys.exit(1)
HAS_LIBCLOUD = False
def main():
@ -162,6 +161,8 @@ def main():
project_id = dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.13.3+) is required for this module')
gce = gce_connect(module)
@ -285,11 +286,11 @@ def main():
changed = True
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
main()
if __name__ == '__main__':
main()

@ -88,8 +88,11 @@ options:
description:
- how long before wait gives up, in seconds
default: 300
requirements: [ "linode-python", "pycurl" ]
author: Vincent Viallet
requirements:
- "python >= 2.6"
- "linode-python"
- "pycurl"
author: "Vincent Viallet (@zbal)"
notes:
- LINODE_API_KEY env variable can be used instead
'''
@ -151,22 +154,21 @@ EXAMPLES = '''
state: restarted
'''
import sys
import time
import os
try:
import pycurl
HAS_PYCURL = True
except ImportError:
print("failed=True msg='pycurl required for this module'")
sys.exit(1)
HAS_PYCURL = False
try:
from linode import api as linode_api
HAS_LINODE = True
except ImportError:
print("failed=True msg='linode-python required for this module'")
sys.exit(1)
HAS_LINODE = False
def randompass():
@ -456,6 +458,11 @@ def main():
)
)
if not HAS_PYCURL:
module.fail_json(msg='pycurl required for this module')
if not HAS_LINODE:
module.fail_json(msg='linode-python required for this module')
state = module.params.get('state')
api_key = module.params.get('api_key')
name = module.params.get('name')
@ -490,4 +497,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
main()
if __name__ == '__main__':
main()

@ -0,0 +1,56 @@
OpenStack Ansible Modules
=========================
These are a set of modules for interacting with OpenStack as either an admin
or an end user. If the module does not begin with os_, it's either deprecated
or soon to be. This document serves as developer coding guidelines for
modules intended to be here.
Naming
------
* All modules should start with os_
* If the module is one that a cloud consumer would expect to use, it should be
named after the logical resource it manages. Thus, os\_server not os\_nova.
The reasoning for this is that there are more than one resource that are
managed by more than one service and which one manages it is a deployment
detail. A good example of this are floating IPs, which can come from either
Nova or Neutron, but which one they come from is immaterial to an end user.
* If the module is one that a cloud admin would expect to use, it should be
be named with the service and the resouce, such as os\_keystone\_domain.
* If the module is one that a cloud admin and a cloud consumer could both use,
the cloud consumer rules apply.
Interface
---------
* If the resource being managed has an id, it should be returned.
* If the resource being managed has an associated object more complex than
an id, it should also be returned.
Interoperability
----------------
* It should be assumed that the cloud consumer does not know a bazillion
details about the deployment choices their cloud provider made, and a best
effort should be made to present one sane interface to the ansible user
regardless of deployer insanity.
* All modules should work appropriately against all existing known public
OpenStack clouds.
* It should be assumed that a user may have more than one cloud account that
they wish to combine as part of a single ansible managed infrastructure.
Libraries
---------
* All modules should use openstack\_full\_argument\_spec to pick up the
standard input such as auth and ssl support.
* All modules should extends\_documentation\_fragment: openstack to go along
with openstack\_full\_argument\_spec.
* All complex cloud interaction or interoperability code should be housed in
the [shade](http://git.openstack.org/cgit/openstack-infra/shade) library.
* All OpenStack API interactions should happen via shade and not via
OpenStack Client libraries. The OpenStack Client libraries do no have end
users as a primary audience, they are for intra-server communication. The
python-openstacksdk is the future there, and shade will migrate to it when
its ready in a manner that is not noticable to ansible users.

@ -20,6 +20,7 @@ DOCUMENTATION = '''
---
module: glance_image
version_added: "1.2"
deprecated: Deprecated in 1.10. Use os_image instead
short_description: Add/Delete images from glance
description:
- Add or Remove images from the glance repository.
@ -111,7 +112,10 @@ options:
required: false
default: publicURL
version_added: "1.7"
requirements: ["glanceclient", "keystoneclient"]
requirements:
- "python >= 2.6"
- "python-glanceclient"
- "python-keystoneclient"
'''
@ -130,9 +134,14 @@ EXAMPLES = '''
import time
try:
import glanceclient
HAS_GLANCECLIENT = True
except ImportError:
HAS_GLANCECLIENT = False
try:
from keystoneclient.v2_0 import client as ksclient
HAS_KEYSTONECLIENT = True
except ImportError:
print("failed=True msg='glanceclient and keystone client are required'")
HAS_KEYSTONECLIENT= False
def _get_ksclient(module, kwargs):
@ -237,6 +246,12 @@ def main():
argument_spec=argument_spec,
mutually_exclusive = [['file','copy_from']],
)
if not HAVE_GLANCECLIENT:
module.fail_json(msg='python-glanceclient is required for this module')
if not HAVE_KEYSTONECLIENT:
module.fail_json(msg='python-keystoneclient is required for this module')
if module.params['state'] == 'present':
if not module.params['file'] and not module.params['copy_from']:
module.fail_json(msg="Either file or copy_from variable should be set to create the image")
@ -257,4 +272,5 @@ def main():
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
if __name__ == '__main__':
main()

@ -19,20 +19,22 @@
import operator
import os
import time
try:
from novaclient.v1_1 import client as nova_client
from novaclient.v1_1 import floating_ips
from novaclient import exceptions
from novaclient import utils
import time
HAS_NOVACLIENT = True
except ImportError:
print("failed=True msg='novaclient is required for this module'")
HAS_NOVACLIENT = False
DOCUMENTATION = '''
---
module: nova_compute
version_added: "1.2"
deprecated: Deprecated in 2.0. Use os_server instead
short_description: Create/Delete VMs from OpenStack
description:
- Create or Remove virtual machines from Openstack.
@ -174,7 +176,9 @@ options:
required: false
default: None
version_added: "1.9"
requirements: ["novaclient"]
requirements:
- "python >= 2.6"
- "python-novaclient"
'''
EXAMPLES = '''
@ -518,7 +522,7 @@ def _get_server_state(module, nova):
(ip_changed, server) = _check_floating_ips(module, nova, server)
private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private')
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
module.exit_json(changed = ip_changed, id = server.id, public_ip = ''.join(public), private_ip = ''.join(private), info = server._info)
module.exit_json(changed = ip_changed, id = server.id, public_ip = public, private_ip = private, info = server._info)
if server and module.params['state'] == 'absent':
return True
if module.params['state'] == 'absent':
@ -562,6 +566,9 @@ def main():
],
)
if not HAS_NOVACLIENT:
module.fail_json(msg='python-novaclient is required for this module')
nova = nova_client.Client(module.params['login_username'],
module.params['login_password'],
module.params['login_tenant_name'],
@ -588,4 +595,5 @@ def main():
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
if __name__ == '__main__':
main()

@ -22,13 +22,15 @@ try:
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'")
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_network
version_added: "1.4"
deprecated: Deprecated in 2.0. Use os_network instead
short_description: Creates/Removes networks from OpenStack
description:
- Add or Remove network from OpenStack.
@ -103,7 +105,10 @@ options:
- Whether the state should be marked as up or down
required: false
default: true
requirements: ["quantumclient", "neutronclient", "keystoneclient"]
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
@ -244,6 +249,9 @@ def main():
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
if module.params['provider_network_type'] in ['vlan' , 'flat']:
if not module.params['provider_physical_network']:
module.fail_json(msg = " for vlan and flat networks, variable provider_physical_network should be set.")
@ -275,5 +283,6 @@ def main():
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
if __name__ == '__main__':
main()

@ -22,12 +22,14 @@ try:
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
print("failed=True msg='quantumclient (or neutronclient) and keystoneclient are required'")
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_subnet
deprecated: Deprecated in 2.0. Use os_subnet instead
version_added: "1.2"
short_description: Add/remove subnet from a network
description:
@ -114,7 +116,10 @@ options:
- From the subnet pool the last IP that should be assigned to the virtual machines
required: false
default: None
requirements: ["quantumclient", "neutronclient", "keystoneclient"]
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
@ -267,6 +272,9 @@ def main():
allocation_pool_end = dict(default=None),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
if module.params['state'] == 'present':
@ -287,5 +295,6 @@ def main():
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
if __name__ == '__main__':
main()

@ -72,8 +72,10 @@ options:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
requirements: [ python-keystoneclient ]
author: Lorin Hochstein
requirements:
- "python >= 2.6"
- python-keystoneclient
author: "Lorin Hochstein (@lorin)"
'''
EXAMPLES = '''

@ -17,17 +17,21 @@
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import time
try:
from novaclient.v1_1 import client as nova_client
from novaclient import exceptions as exc
import time
HAS_NOVACLIENT = True
except ImportError:
print("failed=True msg='novaclient is required for this module to work'")
HAS_NOVACLIENT = False
DOCUMENTATION = '''
---
module: nova_keypair
version_added: "1.2"
author:
- "Benno Joy (@bennojoy)"
- "Michael DeHaan"
short_description: Add/Delete key pair from nova
description:
- Add or Remove key pair from nova .
@ -73,7 +77,9 @@ options:
required: false
default: None
requirements: ["novaclient"]
requirements:
- "python >= 2.6"
- "python-novaclient"
'''
EXAMPLES = '''
# Creates a key pair with the running users public key
@ -94,6 +100,8 @@ def main():
state = dict(default='present', choices=['absent', 'present'])
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_NOVACLIENT:
module.fail_json(msg='python-novaclient is required for this module to work')
nova = nova_client.Client(module.params['login_username'],
module.params['login_password'],
@ -135,5 +143,6 @@ def main():
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
if __name__ == '__main__':
main()

@ -27,13 +27,17 @@ DOCUMENTATION = '''
module: os_auth
short_description: Retrieve an auth token
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Retrieve an auth token from an OpenStack Cloud
- Retrieve an auth token from an OpenStack Cloud
requirements:
- "python >= 2.6"
- "shade"
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Authenticate to the cloud and retreive the service catalog
# Authenticate to the cloud and retrieve the service catalog
- os_auth:
cloud: rax-dfw
- debug: var=service_catalog
@ -61,4 +65,5 @@ def main():
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
if __name__ == '__main__':
main()

@ -0,0 +1,57 @@
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os_client_config
from os_client_config import exceptions
DOCUMENTATION = '''
---
module: os_client_config
short_description: Get OpenStack Client config
description:
- Get I(openstack) client config data from clouds.yaml or environment
version_added: "2.0"
requirements: [ os-client-config ]
author: "Monty Taylor (@emonty)"
'''
EXAMPLES = '''
# Get list of clouds that do not support security groups
- os-client-config:
- debug: var={{ item }}
with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}"
'''
def main():
module = AnsibleModule({})
p = module.params
try:
config = os_client_config.OpenStackConfig()
clouds = []
for cloud in config.get_all_clouds():
cloud.config['name'] = cloud.name
clouds.append(cloud.config)
module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds)))
except exceptions.OpenStackConfigException as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
main()

@ -0,0 +1,188 @@
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#TODO(mordred): we need to support "location"(v1) and "locations"(v2)
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_image
short_description: Add/Delete images from OpenStack Cloud
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or Remove images from the OpenStack Image Repository
options:
name:
description:
- Name that has to be given to the image
required: true
default: None
disk_format:
description:
- The format of the disk that is getting uploaded
required: false
default: qcow2
container_format:
description:
- The format of the container
required: false
default: bare
owner:
description:
- The owner of the image
required: false
default: None
min_disk:
description:
- The minimum disk space required to deploy this image
required: false
default: None
min_ram:
description:
- The minimum ram required to deploy this image
required: false
default: None
is_public:
description:
- Whether the image can be accessed publicly. Note that publicizing an image requires admin role by default.
required: false
default: 'yes'
filename:
description:
- The path to the file which has to be uploaded
required: false
default: None
ramdisk:
descrption:
- The name of an existing ramdisk image that will be associated with this image
required: false
default: None
kernel:
descrption:
- The name of an existing kernel image that will be associated with this image
required: false
default: None
properties:
description:
- Additional properties to be associated with this image
required: false
default: {}
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
requirements: ["shade"]
'''
EXAMPLES = '''
# Upload an image from a local file named cirros-0.3.0-x86_64-disk.img
- os_image:
auth:
auth_url: http://localhost/auth/v2.0
username: admin
password: passme
project_name: admin
name: cirros
container_format: bare
disk_format: qcow2
state: present
filename: cirros-0.3.0-x86_64-disk.img
kernel: cirros-vmlinuz
ramdisk: cirros-initrd
properties:
cpu_arch: x86_64
distro: ubuntu
'''
def main():
argument_spec = openstack_full_argument_spec(
name = dict(required=True),
disk_format = dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']),
container_format = dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova']),
owner = dict(default=None),
min_disk = dict(default=None),
min_ram = dict(default=None),
is_public = dict(default=False),
filename = dict(default=None),
ramdisk = dict(default=None),
kernel = dict(default=None),
properties = dict(default={}),
state = dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
changed = False
image = cloud.get_image(name_or_id=module.params['name'])
if module.params['state'] == 'present':
if not image:
image = cloud.create_image(
name=module.params['name'],
filename=module.params['filename'],
disk_format=module.params['disk_format'],
container_format=module.params['container_format'],
wait=module.params['wait'],
timeout=module.params['timeout']
)
changed = True
if not module.params['wait']:
module.exit_json(changed=changed, image=image, id=image.id)
cloud.update_image_properties(
image=image,
kernel=module.params['kernel'],
ramdisk=module.params['ramdisk'],
**module.params['properties'])
image = cloud.get_image(name_or_id=image.id)
module.exit_json(changed=changed, image=image, id=image.id)
elif module.params['state'] == 'absent':
if not image:
changed = False
else:
cloud.delete_image(
name_or_id=module.params['name'],
wait=module.params['wait'],
timeout=module.params['timeout'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message, extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

@ -0,0 +1,353 @@
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2014, Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
import jsonpatch
DOCUMENTATION = '''
---
module: os_ironic
short_description: Create/Delete Bare Metal Resources from OpenStack
extends_documentation_fragment: openstack
author: "Monty Taylor (@emonty)"
version_added: "2.0"
description:
- Create or Remove Ironic nodes from OpenStack.
options:
state:
description:
- Indicates desired state of the resource
choices: ['present', 'absent']
default: present
uuid:
description:
- globally unique identifier (UUID) to be given to the resource. Will
be auto-generated if not specified, and name is specified.
- Definition of a UUID will always take precedence to a name value.
required: false
default: None
name:
description:
- unique name identifier to be given to the resource.
required: false
default: None
driver:
description:
- The name of the Ironic Driver to use with this node.
required: true
default: None
chassis_uuid:
description:
- Associate the node with a pre-defined chassis.
required: false
default: None
ironic_url:
description:
- If noauth mode is utilized, this is required to be set to the
endpoint URL for the Ironic API. Use with "auth" and "auth_type"
settings set to None.
required: false
default: None
driver_info:
description:
- Information for this server's driver. Will vary based on which
driver is in use. Any sub-field which is populated will be validated
during creation.
suboptions:
power:
description:
- Information necessary to turn this server on / off.
This often includes such things as IPMI username, password, and IP address.
required: true
deploy:
description:
- Information necessary to deploy this server directly, without using Nova. THIS IS NOT RECOMMENDED.
console:
description:
- Information necessary to connect to this server's serial console. Not all drivers support this.
management:
description:
- Information necessary to interact with this server's management interface. May be shared by power_info in some cases.
required: true
nics:
description:
- 'A list of network interface cards, eg, " - mac: aa:bb:cc:aa:bb:cc"'
required: true
properties:
description:
- Definition of the physical characteristics of this server, used for scheduling purposes
suboptions:
cpu_arch:
description:
- CPU architecture (x86_64, i686, ...)
default: x86_64
cpus:
description:
- Number of CPU cores this machine has
default: 1
ram:
description:
- amount of RAM this machine has, in MB
default: 1
disk_size:
description:
- size of first storage device in this machine (typically /dev/sda), in GB
default: 1
skip_update_of_driver_password:
description:
- Allows the code that would assert changes to nodes to skip the
update if the change is a single line consisting of the password
field. As of Kilo, by default, passwords are always masked to API
requests, which means the logic as a result always attempts to
re-assert the password field.
required: false
default: false
requirements: ["shade", "jsonpatch"]
'''
EXAMPLES = '''
# Enroll a node with some basic properties and driver info
- os_ironic:
cloud: "devstack"
driver: "pxe_ipmitool"
uuid: "00000000-0000-0000-0000-000000000002"
properties:
cpus: 2
cpu_arch: "x86_64"
ram: 8192
disk_size: 64
nics:
- mac: "aa:bb:cc:aa:bb:cc"
- mac: "dd:ee:ff:dd:ee:ff"
driver_info:
power:
ipmi_address: "1.2.3.4"
ipmi_username: "admin"
ipmi_password: "adminpass"
chassis_uuid: "00000000-0000-0000-0000-000000000001"
'''
def _parse_properties(module):
p = module.params['properties']
props = dict(
cpu_arch=p.get('cpu_arch') if p.get('cpu_arch') else 'x86_64',
cpus=p.get('cpus') if p.get('cpus') else 1,
memory_mb=p.get('ram') if p.get('ram') else 1,
local_gb=p.get('disk_size') if p.get('disk_size') else 1,
)
return props
def _parse_driver_info(module):
p = module.params['driver_info']
info = p.get('power')
if not info:
raise shade.OpenStackCloudException(
"driver_info['power'] is required")
if p.get('console'):
info.update(p.get('console'))
if p.get('management'):
info.update(p.get('management'))
if p.get('deploy'):
info.update(p.get('deploy'))
return info
def _choose_id_value(module):
if module.params['uuid']:
return module.params['uuid']
if module.params['name']:
return module.params['name']
return None
def _is_value_true(value):
true_values = [True, 'yes', 'Yes', 'True', 'true']
if value in true_values:
return True
return False
def _choose_if_password_only(module, patch):
if len(patch) is 1:
if 'password' in patch[0]['path'] and _is_value_true(
module.params['skip_update_of_masked_password']):
# Return false to aabort update as the password appears
# to be the only element in the patch.
return False
return True
def _exit_node_not_updated(module, server):
module.exit_json(
changed=False,
result="Node not updated",
uuid=server['uuid'],
provision_state=server['provision_state']
)
def main():
argument_spec = openstack_full_argument_spec(
uuid=dict(required=False),
name=dict(required=False),
driver=dict(required=False),
driver_info=dict(type='dict', required=True),
nics=dict(type='list', required=True),
properties=dict(type='dict', default={}),
ironic_url=dict(required=False),
chassis_uuid=dict(required=False),
skip_update_of_masked_password=dict(required=False, choices=BOOLEANS),
state=dict(required=False, default='present')
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['auth_type'] in [None, 'None'] and
module.params['ironic_url'] is None):
module.fail_json(msg="Authentication appears to be disabled, "
"Please define an ironic_url parameter")
if (module.params['ironic_url'] and
module.params['auth_type'] in [None, 'None']):
module.params['auth'] = dict(
endpoint=module.params['ironic_url']
)
node_id = _choose_id_value(module)
try:
cloud = shade.operator_cloud(**module.params)
server = cloud.get_machine(node_id)
if module.params['state'] == 'present':
if module.params['driver'] is None:
module.fail_json(msg="A driver must be defined in order "
"to set a node to present.")
properties = _parse_properties(module)
driver_info = _parse_driver_info(module)
kwargs = dict(
driver=module.params['driver'],
properties=properties,
driver_info=driver_info,
name=module.params['name'],
)
if module.params['chassis_uuid']:
kwargs['chassis_uuid'] = module.params['chassis_uuid']
if server is None:
# Note(TheJulia): Add a specific UUID to the request if
# present in order to be able to re-use kwargs for if
# the node already exists logic, since uuid cannot be
# updated.
if module.params['uuid']:
kwargs['uuid'] = module.params['uuid']
server = cloud.register_machine(module.params['nics'],
**kwargs)
module.exit_json(changed=True, uuid=server['uuid'],
provision_state=server['provision_state'])
else:
# TODO(TheJulia): Presently this does not support updating
# nics. Support needs to be added.
#
# Note(TheJulia): This message should never get logged
# however we cannot realistically proceed if neither a
# name or uuid was supplied to begin with.
if not node_id:
module.fail_json(msg="A uuid or name value "
"must be defined")
# Note(TheJulia): Constructing the configuration to compare
# against. The items listed in the server_config block can
# be updated via the API.
server_config = dict(
driver=server['driver'],
properties=server['properties'],
driver_info=server['driver_info'],
name=server['name'],
)
# Add the pre-existing chassis_uuid only if
# it is present in the server configuration.
if hasattr(server, 'chassis_uuid'):
server_config['chassis_uuid'] = server['chassis_uuid']
# Note(TheJulia): If a password is defined and concealed, a
# patch will always be generated and re-asserted.
patch = jsonpatch.JsonPatch.from_diff(server_config, kwargs)
if not patch:
_exit_node_not_updated(module, server)
elif _choose_if_password_only(module, list(patch)):
# Note(TheJulia): Normally we would allow the general
# exception catch below, however this allows a specific
# message.
try:
server = cloud.patch_machine(
server['uuid'],
list(patch))
except Exception as e:
module.fail_json(msg="Failed to update node, "
"Error: %s" % e.message)
# Enumerate out a list of changed paths.
change_list = []
for change in list(patch):
change_list.append(change['path'])
module.exit_json(changed=True,
result="Node Updated",
changes=change_list,
uuid=server['uuid'],
provision_state=server['provision_state'])
# Return not updated by default as the conditions were not met
# to update.
_exit_node_not_updated(module, server)
if module.params['state'] == 'absent':
if not node_id:
module.fail_json(msg="A uuid or name value must be defined "
"in order to remove a node.")
if server is not None:
cloud.unregister_machine(module.params['nics'],
server['uuid'])
module.exit_json(changed=True, result="deleted")
else:
module.exit_json(changed=False, result="Server not found")
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

@ -0,0 +1,333 @@
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2015, Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_ironic_node
short_description: Activate/Deactivate Bare Metal Resources from OpenStack
author: "Monty Taylor (@emonty)"
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Deploy to nodes controlled by Ironic.
options:
state:
description:
- Indicates desired state of the resource
choices: ['present', 'absent']
default: present
deploy:
description:
- Indicates if the resource should be deployed. Allows for deployment
logic to be disengaged and control of the node power or maintenance
state to be changed.
choices: ['true', 'false']
default: true
uuid:
description:
- globally unique identifier (UUID) to be given to the resource.
required: false
default: None
ironic_url:
description:
- If noauth mode is utilized, this is required to be set to the
endpoint URL for the Ironic API. Use with "auth" and "auth_type"
settings set to None.
required: false
default: None
config_drive:
description:
- A configdrive file or HTTP(S) URL that will be passed along to the
node.
required: false
default: None
instance_info:
description:
- Definition of the instance information which is used to deploy
the node. This information is only required when an instance is
set to present.
suboptions:
image_source:
description:
- An HTTP(S) URL where the image can be retrieved from.
image_checksum:
description:
- The checksum of image_source.
image_disk_format:
description:
- The type of image that has been requested to be deployed.
power:
description:
- A setting to allow power state to be asserted allowing nodes
that are not yet deployed to be powered on, and nodes that
are deployed to be powered off.
choices: ['present', 'absent']
default: present
maintenance:
description:
- A setting to allow the direct control if a node is in
maintenance mode.
required: false
default: false
maintenance_reason:
description:
- A string expression regarding the reason a node is in a
maintenance mode.
required: false
default: None
'''
EXAMPLES = '''
# Activate a node by booting an image with a configdrive attached
os_ironic_node:
cloud: "openstack"
uuid: "d44666e1-35b3-4f6b-acb0-88ab7052da69"
state: present
power: present
deploy: True
maintenance: False
config_drive: "http://192.168.1.1/host-configdrive.iso"
instance_info:
image_source: "http://192.168.1.1/deploy_image.img"
image_checksum: "356a6b55ecc511a20c33c946c4e678af"
image_disk_format: "qcow"
delegate_to: localhost
'''
def _choose_id_value(module):
if module.params['uuid']:
return module.params['uuid']
if module.params['name']:
return module.params['name']
return None
# TODO(TheJulia): Change this over to use the machine patch method
# in shade once it is available.
def _prepare_instance_info_patch(instance_info):
patch = []
patch.append({
'op': 'replace',
'path': '/instance_info',
'value': instance_info
})
return patch
def _is_true(value):
true_values = [True, 'yes', 'Yes', 'True', 'true', 'present', 'on']
if value in true_values:
return True
return False
def _is_false(value):
false_values = [False, None, 'no', 'No', 'False', 'false', 'absent', 'off']
if value in false_values:
return True
return False
def _check_set_maintenance(module, cloud, node):
if _is_true(module.params['maintenance']):
if _is_false(node['maintenance']):
cloud.set_machine_maintenance_state(
node['uuid'],
True,
reason=module.params['maintenance_reason'])
module.exit_json(changed=True, msg="Node has been set into "
"maintenance mode")
else:
# User has requested maintenance state, node is already in the
# desired state, checking to see if the reason has changed.
if (str(node['maintenance_reason']) not in
str(module.params['maintenance_reason'])):
cloud.set_machine_maintenance_state(
node['uuid'],
True,
reason=module.params['maintenance_reason'])
module.exit_json(changed=True, msg="Node maintenance reason "
"updated, cannot take any "
"additional action.")
elif _is_false(module.params['maintenance']):
if node['maintenance'] is True:
cloud.remove_machine_from_maintenance(node['uuid'])
return True
else:
module.fail_json(msg="maintenance parameter was set but a valid "
"the value was not recognized.")
return False
def _check_set_power_state(module, cloud, node):
if 'power on' in str(node['power_state']):
if _is_false(module.params['power']):
# User has requested the node be powered off.
cloud.set_machine_power_off(node['uuid'])
module.exit_json(changed=True, msg="Power requested off")
if 'power off' in str(node['power_state']):
if (_is_false(module.params['power']) and
_is_false(module.params['state'])):
return False
if (_is_false(module.params['power']) and
_is_false(module.params['state'])):
module.exit_json(
changed=False,
msg="Power for node is %s, node must be reactivated "
"OR set to state absent"
)
# In the event the power has been toggled on and
# deployment has been requested, we need to skip this
# step.
if (_is_true(module.params['power']) and
_is_false(module.params['deploy'])):
# Node is powered down when it is not awaiting to be provisioned
cloud.set_machine_power_on(node['uuid'])
return True
# Default False if no action has been taken.
return False
def main():
argument_spec = openstack_full_argument_spec(
uuid=dict(required=False),
name=dict(required=False),
instance_info=dict(type='dict', required=False),
config_drive=dict(required=False),
ironic_url=dict(required=False),
state=dict(required=False, default='present'),
maintenance=dict(required=False),
maintenance_reason=dict(required=False),
power=dict(required=False, default='present'),
deploy=dict(required=False, default=True),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['auth_type'] in [None, 'None'] and
module.params['ironic_url'] is None):
module.fail_json(msg="Authentication appears disabled, Please "
"define an ironic_url parameter")
if (module.params['ironic_url'] and
module.params['auth_type'] in [None, 'None']):
module.params['auth'] = dict(
endpoint=module.params['ironic_url']
)
node_id = _choose_id_value(module)
if not node_id:
module.fail_json(msg="A uuid or name value must be defined "
"to use this module.")
try:
cloud = shade.operator_cloud(**module.params)
node = cloud.get_machine(node_id)
if node is None:
module.fail_json(msg="node not found")
uuid = node['uuid']
instance_info = module.params['instance_info']
changed = False
# User has reqeusted desired state to be in maintenance state.
if module.params['state'] is 'maintenance':
module.params['maintenance'] = True
if node['provision_state'] in [
'cleaning',
'deleting',
'wait call-back']:
module.fail_json(msg="Node is in %s state, cannot act upon the "
"request as the node is in a transition "
"state" % node['provision_state'])
# TODO(TheJulia) This is in-development code, that requires
# code in the shade library that is still in development.
if _check_set_maintenance(module, cloud, node):
if node['provision_state'] in 'active':
module.exit_json(changed=True,
result="Maintenance state changed")
changed = True
node = cloud.get_machine(node_id)
if _check_set_power_state(module, cloud, node):
changed = True
node = cloud.get_machine(node_id)
if _is_true(module.params['state']):
if _is_false(module.params['deploy']):
module.exit_json(
changed=changed,
result="User request has explicitly disabled "
"deployment logic"
)
if 'active' in node['provision_state']:
module.exit_json(
changed=changed,
result="Node already in an active state."
)
if instance_info is None:
module.fail_json(
changed=changed,
msg="When setting an instance to present, "
"instance_info is a required variable.")
# TODO(TheJulia): Update instance info, however info is
# deployment specific. Perhaps consider adding rebuild
# support, although there is a known desire to remove
# rebuild support from Ironic at some point in the future.
patch = _prepare_instance_info_patch(instance_info)
cloud.set_node_instance_info(uuid, patch)
cloud.validate_node(uuid)
cloud.activate_node(uuid, module.params['config_drive'])
# TODO(TheJulia): Add more error checking and a wait option.
# We will need to loop, or just add the logic to shade,
# although this could be a very long running process as
# baremetal deployments are not a "quick" task.
module.exit_json(changed=changed, result="node activated")
elif _is_false(module.params['state']):
if node['provision_state'] not in "deleted":
cloud.purge_node_instance_info(uuid)
cloud.deactivate_node(uuid)
module.exit_json(changed=True, result="deleted")
else:
module.exit_json(changed=False, result="node not found")
else:
module.fail_json(msg="State must be present, absent, "
"maintenance, off")
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

@ -0,0 +1,107 @@
#!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_network
short_description: Creates/Removes networks from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or Remove network from OpenStack.
options:
name:
description:
- Name to be assigned to the network.
required: true
shared:
description:
- Whether this network is shared or not.
required: false
default: false
admin_state_up:
description:
- Whether the state should be marked as up or down.
required: false
default: true
state:
description:
- Indicate desired state of the resource.
choices: ['present', 'absent']
required: false
default: present
requirements: ["shade"]
'''
EXAMPLES = '''
- os_network:
name=t1network
state=present
'''
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
shared=dict(default=False, type='bool'),
admin_state_up=dict(default=True, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
name = module.params['name']
shared = module.params['shared']
admin_state_up = module.params['admin_state_up']
try:
cloud = shade.openstack_cloud(**module.params)
net = cloud.get_network(name)
if state == 'present':
if not net:
net = cloud.create_network(name, shared, admin_state_up)
module.exit_json(changed=False, network=net, id=net['id'])
elif state == 'absent':
if not net:
module.exit_json(changed=False)
else:
cloud.delete_network(name)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

@ -0,0 +1,125 @@
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_object
short_description: Create or Delete objects and containers from OpenStack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
extends_documentation_fragment: openstack
description:
- Create or Delete objects and containers from OpenStack
options:
container:
description:
- The name of the container in which to create the object
required: true
name:
description:
- Name to be give to the object. If omitted, operations will be on
the entire container
required: false
file:
description:
- Path to local file to be uploaded.
required: false
container_access:
description:
- desired container access level.
required: false
choices: ['private', 'public']
default: private
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
'''
EXAMPLES = '''
# Creates a object named 'fstab' in the 'config' container
- os_object: cloud=mordred state=present name=fstab container=config file=/etc/fstab
# Deletes a container called config and all of its contents
- os_object: cloud=rax-iad state=absent container=config
'''
def process_object(
cloud_obj, container, name, filename, container_access, **kwargs):
changed = False
container_obj = cloud_obj.get_container(container)
if kwargs['state'] == 'present':
if not container_obj:
container_obj = cloud_obj.create_container(container)
changed = True
if cloud_obj.get_container_access(container) != container_access:
cloud_obj.set_container_access(container, container_access)
changed = True
if name:
if cloud_obj.is_object_stale(container, name, filename):
cloud_obj.create_object(container, name, filename)
changed = True
else:
if container_obj:
if name:
if cloud_obj.get_object_metadata(container, name):
cloud_obj.delete_object(container, name)
changed= True
else:
cloud_obj.delete_container(container)
changed= True
return changed
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
container=dict(required=True),
filename=dict(required=False, default=None),
container_access=dict(default='private', choices=['private', 'public']),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
changed = process_object(cloud, **module.params)
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

@ -0,0 +1,142 @@
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_security_group
short_description: Add/Delete security groups from an OpenStack cloud.
extends_documentation_fragment: openstack
author: "Monty Taylor (@emonty)"
version_added: "2.0"
description:
- Add or Remove security groups from an OpenStack cloud.
options:
name:
description:
- Name that has to be given to the security group. This module
requires that security group names be unique.
required: true
description:
description:
- Long description of the purpose of the security group
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
'''
EXAMPLES = '''
# Create a security group
- os_security_group:
cloud=mordred
state=present
name=foo
description=security group for foo servers
# Update the existing 'foo' security group description
- os_security_group:
cloud=mordred
state=present
name=foo
description=updated description for the foo security group
'''
def _needs_update(module, secgroup):
"""Check for differences in the updatable values.
NOTE: We don't currently allow name updates.
"""
if secgroup['description'] != module.params['description']:
return True
return False
def _system_state_change(module, secgroup):
state = module.params['state']
if state == 'present':
if not secgroup:
return True
return _needs_update(module, secgroup)
if state == 'absent' and secgroup:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
description=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
name = module.params['name']
state = module.params['state']
description = module.params['description']
try:
cloud = shade.openstack_cloud(**module.params)
secgroup = cloud.get_security_group(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, secgroup))
changed = False
if state == 'present':
if not secgroup:
secgroup = cloud.create_security_group(name, description)
changed = True
else:
if _needs_update(module, secgroup):
secgroup = cloud.update_security_group(
secgroup['id'], description=description)
changed = True
module.exit_json(
changed=changed, id=secgroup['id'], secgroup=secgroup)
if state == 'absent':
if secgroup:
cloud.delete_security_group(secgroup['id'])
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

@ -0,0 +1,451 @@
#!/usr/bin/python
# coding: utf-8 -*-
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
# Copyright (c) 2013, John Dewey <john@dewey.ws>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_server
short_description: Create/Delete Compute Instances from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Create or Remove compute instances from OpenStack.
options:
name:
description:
- Name that has to be given to the instance
required: true
image:
description:
- The name or id of the base image to boot.
required: true
image_exclude:
description:
- Text to use to filter image names, for the case, such as HP, where
there are multiple image names matching the common identifying
portions. image_exclude is a negative match filter - it is text that
may not exist in the image name. Defaults to "(deprecated)"
flavor:
description:
- The name or id of the flavor in which the new instance has to be
created. Mutually exclusive with flavor_ram
required: false
default: 1
flavor_ram:
description:
- The minimum amount of ram in MB that the flavor in which the new
instance has to be created must have. Mutually exclusive with flavor.
required: false
default: 1
flavor_include:
description:
- Text to use to filter flavor names, for the case, such as Rackspace,
where there are multiple flavors that have the same ram count.
flavor_include is a positive match filter - it must exist in the
flavor name.
key_name:
description:
- The key pair name to be used when creating a instance
required: false
default: None
security_groups:
description:
- The name of the security group to which the instance should be added
required: false
default: None
nics:
description:
- A list of networks to which the instance's interface should
be attached. Networks may be referenced by net-id or net-name.
required: false
default: None
public_ip:
description:
- Ensure instance has public ip however the cloud wants to do that
required: false
default: 'yes'
floating_ips:
description:
- list of valid floating IPs that pre-exist to assign to this node
required: false
default: None
floating_ip_pools:
description:
- list of floating IP pools from which to choose a floating IP
required: false
default: None
meta:
description:
- A list of key value pairs that should be provided as a metadata to
the new instance.
required: false
default: None
wait:
description:
- If the module should wait for the instance to be created.
required: false
default: 'yes'
timeout:
description:
- The amount of time the module should wait for the instance to get
into active state.
required: false
default: 180
config_drive:
description:
- Whether to boot the server with config drive enabled
required: false
default: 'no'
userdata:
description:
- Opaque blob of data which is made available to the instance
required: false
default: None
root_volume:
description:
- Boot instance from a volume
required: false
default: None
terminate_volume:
description:
- If true, delete volume when deleting instance (if booted from volume)
default: false
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Creates a new instance and attaches to a network and passes metadata to
# the instance
- os_server:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: vm1
image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
key_name: ansible_key
timeout: 200
flavor: 4
nics:
- net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723
- net-name: another_network
meta:
hostname: test1
group: uge_master
# Creates a new instance in HP Cloud AE1 region availability zone az2 and
# automatically assigns a floating IP
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance
os_server:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: username
password: Equality7-2521
project_name: username-project1
name: vm1
region_name: region-b.geo-1
availability_zone: az2
image: 9302692b-b787-4b52-a3a6-daebb79cb498
key_name: test
timeout: 200
flavor: 101
security_groups: default
auto_floating_ip: yes
# Creates a new instance in named cloud mordred availability zone az2
# and assigns a pre-known floating IP
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance
os_server:
state: present
cloud: mordred
name: vm1
availability_zone: az2
image: 9302692b-b787-4b52-a3a6-daebb79cb498
key_name: test
timeout: 200
flavor: 101
floating-ips:
- 12.34.56.79
# Creates a new instance with 4G of RAM on Ubuntu Trusty, ignoring
# deprecated images
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance
os_server:
name: vm1
state: present
cloud: mordred
region_name: region-b.geo-1
image: Ubuntu Server 14.04
image_exclude: deprecated
flavor_ram: 4096
# Creates a new instance with 4G of RAM on Ubuntu Trusty on a Performance node
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance
os_server:
name: vm1
cloud: rax-dfw
state: present
image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
flavor_ram: 4096
flavor_include: Performance
'''
def _exit_hostvars(module, cloud, server, changed=True):
hostvars = meta.get_hostvars_from_server(cloud, server)
module.exit_json(
changed=changed, server=server, id=server.id, openstack=hostvars)
def _network_args(module, cloud):
args = []
for net in module.params['nics']:
if net.get('net-id'):
args.append(net)
elif net.get('net-name'):
by_name = cloud.get_network(net['net-name'])
if not by_name:
module.fail_json(
msg='Could not find network by net-name: %s' %
net['net-name'])
args.append({'net-id': by_name['id']})
return args
def _delete_server(module, cloud):
try:
cloud.delete_server(
module.params['name'], wait=module.params['wait'],
timeout=module.params['timeout'])
except Exception as e:
module.fail_json(msg="Error in deleting vm: %s" % e.message)
module.exit_json(changed=True, result='deleted')
def _create_server(module, cloud):
flavor = module.params['flavor']
flavor_ram = module.params['flavor_ram']
flavor_include = module.params['flavor_include']
image_id = None
if not module.params['root_volume']:
image_id = cloud.get_image_id(
module.params['image'], module.params['image_exclude'])
if flavor:
flavor_dict = cloud.get_flavor(flavor)
else:
flavor_dict = cloud.get_flavor_by_ram(flavor_ram, flavor_include)
nics = _network_args(module, cloud)
bootkwargs = dict(
name=module.params['name'],
image=image_id,
flavor=flavor_dict['id'],
nics=nics,
meta=module.params['meta'],
security_groups=module.params['security_groups'].split(','),
userdata=module.params['userdata'],
config_drive=module.params['config_drive'],
)
for optional_param in ('region_name', 'key_name', 'availability_zone'):
if module.params[optional_param]:
bootkwargs[optional_param] = module.params[optional_param]
server = cloud.create_server(
ip_pool=module.params['floating_ip_pools'],
ips=module.params['floating_ips'],
auto_ip=module.params['auto_floating_ip'],
root_volume=module.params['root_volume'],
terminate_volume=module.params['terminate_volume'],
wait=module.params['wait'], timeout=module.params['timeout'],
**bootkwargs
)
_exit_hostvars(module, cloud, server)
def _delete_floating_ip_list(cloud, server, extra_ips):
for ip in extra_ips:
cloud.nova_client.servers.remove_floating_ip(
server=server.id, address=ip)
def _check_floating_ips(module, cloud, server):
changed = False
auto_floating_ip = module.params['auto_floating_ip']
floating_ips = module.params['floating_ips']
floating_ip_pools = module.params['floating_ip_pools']
if floating_ip_pools or floating_ips or auto_floating_ip:
ips = openstack_find_nova_addresses(server.addresses, 'floating')
if not ips:
# If we're configured to have a floating but we don't have one,
# let's add one
server = cloud.add_ips_to_server(
server,
auto_ip=auto_floating_ip,
ips=floating_ips,
ip_pool=floating_ip_pools,
)
changed = True
elif floating_ips:
# we were configured to have specific ips, let's make sure we have
# those
missing_ips = []
for ip in floating_ips:
if ip not in ips:
missing_ips.append(ip)
if missing_ips:
server = cloud.add_ip_list(server, missing_ips)
changed = True
extra_ips = []
for ip in ips:
if ip not in floating_ips:
extra_ips.append(ip)
if extra_ips:
_delete_floating_ip_list(cloud, server, extra_ips)
changed = True
return (changed, server)
def _get_server_state(module, cloud):
state = module.params['state']
server = cloud.get_server(module.params['name'])
if server and state == 'present':
if server.status != 'ACTIVE':
module.fail_json(
msg="The instance is available but not Active state: "
+ server.status)
(ip_changed, server) = _check_floating_ips(module, cloud, server)
_exit_hostvars(module, cloud, server, ip_changed)
if server and state == 'absent':
return True
if state == 'absent':
module.exit_json(changed=False, result="not present")
return True
def main():
argument_spec = openstack_full_argument_spec(
name = dict(required=True),
image = dict(default=None),
image_exclude = dict(default='(deprecated)'),
flavor = dict(default=None),
flavor_ram = dict(default=None, type='int'),
flavor_include = dict(default=None),
key_name = dict(default=None),
security_groups = dict(default='default'),
nics = dict(default=[]),
meta = dict(default=None),
userdata = dict(default=None),
config_drive = dict(default=False, type='bool'),
auto_floating_ip = dict(default=True, type='bool'),
floating_ips = dict(default=None),
floating_ip_pools = dict(default=None),
root_volume = dict(default=None),
terminate_volume = dict(default=False, type='bool'),
state = dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['auto_floating_ip', 'floating_ips'],
['auto_floating_ip', 'floating_ip_pools'],
['floating_ips', 'floating_ip_pools'],
['flavor', 'flavor_ram'],
['image', 'root_volume'],
],
)
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
image = module.params['image']
root_volume = module.params['root_volume']
flavor = module.params['flavor']
flavor_ram = module.params['flavor_ram']
if state == 'present':
if not (image or root_volume):
module.fail_json(
msg="Parameter 'image' or 'root_volume' is required "
"if state == 'present'"
)
if not flavor and not flavor_ram:
module.fail_json(
msg="Parameter 'flavor' or 'flavor_ram' is required "
"if state == 'present'"
)
try:
cloud_params = dict(module.params)
cloud_params.pop('userdata', None)
cloud = shade.openstack_cloud(**cloud_params)
if state == 'present':
_get_server_state(module, cloud)
_create_server(module, cloud)
elif state == 'absent':
_get_server_state(module, cloud)
_delete_server(module, cloud)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message, extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

@ -0,0 +1,193 @@
#!/usr/bin/python
# coding: utf-8 -*-
# Copyright (c) 2015, Jesse Keating <jlk@derpops.bike>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_server_actions
short_description: Perform actions on Compute Instances from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Jesse Keating (@j2sol)"
description:
- Perform server actions on an existing compute instance from OpenStack.
This module does not return any data other than changed true/false.
options:
server:
description:
- Name or ID of the instance
required: true
wait:
description:
- If the module should wait for the instance action to be performed.
required: false
default: 'yes'
timeout:
description:
- The amount of time the module should wait for the instance to perform
the requested action.
required: false
default: 180
action:
description:
- Perform the given action. The lock and unlock actions always return
changed as the servers API does not provide lock status.
choices: [pause, unpause, lock, unlock, suspend, resume]
default: present
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Pauses a compute instance
- os_server_actions:
action: pause
auth:
auth_url: https://mycloud.openstack.blueboxgrid.com:5001/v2.0
username: admin
password: admin
project_name: admin
server: vm1
timeout: 200
'''
_action_map = {'pause': 'PAUSED',
'unpause': 'ACTIVE',
'lock': 'ACTIVE', # API doesn't show lock/unlock status
'unlock': 'ACTIVE',
'suspend': 'SUSPENDED',
'resume': 'ACTIVE',}
_admin_actions = ['pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock']
def _wait(timeout, cloud, server, action):
"""Wait for the server to reach the desired state for the given action."""
for count in shade._iterate_timeout(
timeout,
"Timeout waiting for server to complete %s" % action):
try:
server = cloud.get_server(server.id)
except Exception:
continue
if server.status == _action_map[action]:
return
if server.status == 'ERROR':
module.fail_json(msg="Server reached ERROR state while attempting to %s" % action)
def _system_state_change(action, status):
"""Check if system state would change."""
if status == _action_map[action]:
return False
return True
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
action=dict(required=True, choices=['pause', 'unpause', 'lock', 'unlock', 'suspend',
'resume']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
action = module.params['action']
wait = module.params['wait']
timeout = module.params['timeout']
try:
if action in _admin_actions:
cloud = shade.operator_cloud(**module.params)
else:
cloud = shade.openstack_cloud(**module.params)
server = cloud.get_server(module.params['server'])
if not server:
module.fail_json(msg='Could not find server %s' % server)
status = server.status
if module.check_mode:
module.exit_json(changed=_system_state_change(action, status))
if action == 'pause':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.pause(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
elif action == 'unpause':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.unpause(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
elif action == 'lock':
# lock doesn't set a state, just do it
cloud.nova_client.servers.lock(server=server.id)
module.exit_json(changed=True)
elif action == 'unlock':
# unlock doesn't set a state, just do it
cloud.nova_client.servers.unlock(server=server.id)
module.exit_json(changed=True)
elif action == 'suspend':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.suspend(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
elif action == 'resume':
if not _system_state_change(action, status):
module.exit_json(changed=False)
cloud.nova_client.servers.resume(server=server.id)
if wait:
_wait(timeout, cloud, server, action)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message, extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

@ -0,0 +1,80 @@
#!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_server_facts
short_description: Retrieve facts about a compute instance
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Retrieve facts about a server instance from OpenStack.
notes:
- Facts are placed in the C(openstack) variable.
requirements:
- "python >= 2.6"
- "shade"
options:
server:
description:
- Name or ID of the instance
required: true
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Gather facts about a previously created server named vm1
- os_server_facts:
cloud: rax-dfw
server: vm1
- debug: var=openstack
'''
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
server = cloud.get_server(module.params['server'])
hostvars = dict(openstack=meta.get_hostvars_from_server(
cloud, server))
module.exit_json(changed=False, ansible_facts=hostvars)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

@ -0,0 +1,155 @@
#!/usr/bin/python
#coding: utf-8 -*-
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_server_volume
short_description: Attach/Detach Volumes from OpenStack VM's
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Attach or Detach volumes from OpenStack VM's
options:
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
required: false
server:
description:
- Name or ID of server you want to attach a volume to
required: true
volume:
description:
- Name or id of volume you want to attach to a server
required: true
device:
description:
- Device you want to attach. Defaults to auto finding a device name.
required: false
default: None
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Attaches a volume to a compute host
- name: attach a volume
hosts: localhost
tasks:
- name: attach volume to host
os_server_volume:
state: present
cloud: mordred
server: Mysql-server
volume: mysql-data
device: /dev/vdb
'''
def _system_state_change(state, device):
"""Check if system state would change."""
if state == 'present':
if device:
return False
return True
if state == 'absent':
if device:
return True
return False
return False
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
volume=dict(required=True),
device=dict(default=None), # None == auto choose device name
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
wait = module.params['wait']
timeout = module.params['timeout']
try:
cloud = shade.openstack_cloud(**module.params)
server = cloud.get_server(module.params['server'])
volume = cloud.get_volume(module.params['volume'])
dev = cloud.get_volume_attach_device(volume, server.id)
if module.check_mode:
module.exit_json(changed=_system_state_change(state, dev))
if state == 'present':
if dev:
# Volume is already attached to this server
module.exit_json(changed=False)
cloud.attach_volume(server, volume, module.params['device'],
wait=wait, timeout=timeout)
server = cloud.get_server(module.params['server']) # refresh
volume = cloud.get_volume(module.params['volume']) # refresh
hostvars = meta.get_hostvars_from_server(cloud, server)
module.exit_json(
changed=True,
id=volume['id'],
attachments=volume['attachments'],
openstack=hostvars
)
elif state == 'absent':
if not dev:
# Volume is not attached to this server
module.exit_json(changed=False)
cloud.detach_volume(server, volume, wait=wait, timeout=timeout)
module.exit_json(
changed=True,
result='Detached volume from server'
)
except (shade.OpenStackCloudException, shade.OpenStackCloudTimeout) as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_utils/common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

@ -0,0 +1,261 @@
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_subnet
short_description: Add/Remove subnet to an OpenStack network
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or Remove a subnet to an OpenStack network
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
required: false
default: present
network_name:
description:
- Name of the network to which the subnet should be attached
required: true when state is 'present'
name:
description:
- The name of the subnet that should be created. Although Neutron
allows for non-unique subnet names, this module enforces subnet
name uniqueness.
required: true
cidr:
description:
- The CIDR representation of the subnet that should be assigned to
the subnet.
required: true when state is 'present'
default: None
ip_version:
description:
- The IP version of the subnet 4 or 6
required: false
default: 4
enable_dhcp:
description:
- Whether DHCP should be enabled for this subnet.
required: false
default: true
gateway_ip:
description:
- The ip that would be assigned to the gateway for this subnet
required: false
default: None
dns_nameservers:
description:
- List of DNS nameservers for this subnet.
required: false
default: None
allocation_pool_start:
description:
- From the subnet pool the starting address from which the IP should
be allocated.
required: false
default: None
allocation_pool_end:
description:
- From the subnet pool the last IP that should be assigned to the
virtual machines.
required: false
default: None
host_routes:
description:
- A list of host route dictionaries for the subnet.
required: false
default: None
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a new (or update an existing) subnet on the specified network
- os_subnet:
state=present
network_name=network1
name=net1subnet
cidr=192.168.0.0/24
dns_nameservers:
- 8.8.8.7
- 8.8.8.8
host_routes:
- destination: 0.0.0.0/0
nexthop: 123.456.78.9
- destination: 192.168.0.0/24
nexthop: 192.168.0.1
# Delete a subnet
- os_subnet:
state=absent
name=net1subnet
'''
def _needs_update(subnet, module):
"""Check for differences in the updatable values."""
enable_dhcp = module.params['enable_dhcp']
subnet_name = module.params['name']
pool_start = module.params['allocation_pool_start']
pool_end = module.params['allocation_pool_end']
gateway_ip = module.params['gateway_ip']
dns = module.params['dns_nameservers']
host_routes = module.params['host_routes']
curr_pool = subnet['allocation_pools'][0]
if subnet['enable_dhcp'] != enable_dhcp:
return True
if subnet_name and subnet['name'] != subnet_name:
return True
if pool_start and curr_pool['start'] != pool_start:
return True
if pool_end and curr_pool['end'] != pool_end:
return True
if gateway_ip and subnet['gateway_ip'] != gateway_ip:
return True
if dns and sorted(subnet['dns_nameservers']) != sorted(dns):
return True
if host_routes:
curr_hr = sorted(subnet['host_routes'], key=lambda t: t.keys())
new_hr = sorted(host_routes, key=lambda t: t.keys())
if sorted(curr_hr) != sorted(new_hr):
return True
return False
def _system_state_change(module, subnet):
state = module.params['state']
if state == 'present':
if not subnet:
return True
return _needs_update(subnet, module)
if state == 'absent' and subnet:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
network_name=dict(default=None),
cidr=dict(default=None),
ip_version=dict(default='4', choices=['4', '6']),
enable_dhcp=dict(default='true', type='bool'),
gateway_ip=dict(default=None),
dns_nameservers=dict(default=None, type='list'),
allocation_pool_start=dict(default=None),
allocation_pool_end=dict(default=None),
host_routes=dict(default=None, type='list'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
network_name = module.params['network_name']
cidr = module.params['cidr']
ip_version = module.params['ip_version']
enable_dhcp = module.params['enable_dhcp']
subnet_name = module.params['name']
gateway_ip = module.params['gateway_ip']
dns = module.params['dns_nameservers']
pool_start = module.params['allocation_pool_start']
pool_end = module.params['allocation_pool_end']
host_routes = module.params['host_routes']
# Check for required parameters when state == 'present'
if state == 'present':
for p in ['network_name', 'cidr']:
if not module.params[p]:
module.fail_json(msg='%s required with present state' % p)
if pool_start and pool_end:
pool = [dict(start=pool_start, end=pool_end)]
elif pool_start or pool_end:
module.fail_json(msg='allocation pool requires start and end values')
else:
pool = None
try:
cloud = shade.openstack_cloud(**module.params)
subnet = cloud.get_subnet(subnet_name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, subnet))
if state == 'present':
if not subnet:
subnet = cloud.create_subnet(network_name, cidr,
ip_version=ip_version,
enable_dhcp=enable_dhcp,
subnet_name=subnet_name,
gateway_ip=gateway_ip,
dns_nameservers=dns,
allocation_pools=pool,
host_routes=host_routes)
changed = True
else:
if _needs_update(subnet, module):
cloud.update_subnet(subnet['id'],
subnet_name=subnet_name,
enable_dhcp=enable_dhcp,
gateway_ip=gateway_ip,
dns_nameservers=dns,
allocation_pools=pool,
host_routes=host_routes)
changed = True
else:
changed = False
module.exit_json(changed=changed)
elif state == 'absent':
if not subnet:
changed = False
else:
changed = True
cloud.delete_subnet(subnet_name)
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

@ -0,0 +1,162 @@
#!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_volume
short_description: Create/Delete Cinder Volumes
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Create or Remove cinder block storage volumes
options:
size:
description:
- Size of volume in GB
required: only when state is 'present'
default: None
display_name:
description:
- Name of volume
required: true
display_description:
description:
- String describing the volume
required: false
default: None
volume_type:
description:
- Volume type for volume
required: false
default: None
image:
description:
- Image name or id for boot from volume
required: false
default: None
snapshot_id:
description:
- Volume snapshot id to create from
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Creates a new volume
- name: create a volume
hosts: localhost
tasks:
- name: create 40g test volume
os_volume:
state: present
cloud: mordred
availability_zone: az2
size: 40
display_name: test_volume
'''
def _present_volume(module, cloud):
if cloud.volume_exists(module.params['display_name']):
v = cloud.get_volume(module.params['display_name'])
module.exit_json(changed=False, id=v['id'], volume=v)
volume_args = dict(
size=module.params['size'],
volume_type=module.params['volume_type'],
display_name=module.params['display_name'],
display_description=module.params['display_description'],
snapshot_id=module.params['snapshot_id'],
availability_zone=module.params['availability_zone'],
)
if module.params['image']:
image_id = cloud.get_image_id(module.params['image'])
volume_args['imageRef'] = image_id
volume = cloud.create_volume(
wait=module.params['wait'], timeout=module.params['timeout'],
**volume_args)
module.exit_json(changed=True, id=volume['id'], volume=volume)
def _absent_volume(module, cloud):
try:
cloud.delete_volume(
name_or_id=module.params['display_name'],
wait=module.params['wait'],
timeout=module.params['timeout'])
except shade.OpenStackCloudTimeout:
module.exit_json(changed=False)
module.exit_json(changed=True)
def main():
argument_spec = openstack_full_argument_spec(
size=dict(default=None),
volume_type=dict(default=None),
display_name=dict(required=True, aliases=['name']),
display_description=dict(default=None, aliases=['description']),
image=dict(default=None),
snapshot_id=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['image', 'snapshot_id'],
],
)
module = AnsibleModule(argument_spec=argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
if state == 'present' and not module.params['size']:
module.fail_json(msg="Size is required when state is 'present'")
try:
cloud = shade.openstack_cloud(**module.params)
if state == 'present':
_present_volume(module, cloud)
if state == 'absent':
_absent_volume(module, cloud)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

@ -16,6 +16,8 @@
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import time
try:
from novaclient.v1_1 import client as nova_client
try:
@ -23,14 +25,17 @@ try:
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
import time
HAVE_DEPS = True
except ImportError:
print("failed=True msg='novaclient,keystoneclient and quantumclient (or neutronclient) are required'")
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_floating_ip
version_added: "1.2"
author:
- "Benno Joy (@bennojoy)"
- "Brad P. Crochet (@bcrochet)"
short_description: Add/Remove floating IP from an instance
description:
- Add or Remove a floating IP to an instance
@ -81,7 +86,11 @@ options:
required: false
default: None
version_added: "1.5"
requirements: ["novaclient", "quantumclient", "neutronclient", "keystoneclient"]
requirements:
- "python >= 2.6"
- "python-novaclient"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
@ -96,7 +105,8 @@ def _get_ksclient(module, kwargs):
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
auth_url=kwargs.get('auth_url'),
region_name=kwargs.get('region_name'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
@ -171,7 +181,7 @@ def _get_port_info(neutron, module, instance_id, internal_network_name=None):
return None, None
return fixed_ip_address, port_id
def _get_floating_ip(module, neutron, fixed_ip_address):
def _get_floating_ip(module, neutron, fixed_ip_address, network_name):
kwargs = {
'fixed_ip_address': fixed_ip_address
}
@ -181,7 +191,16 @@ def _get_floating_ip(module, neutron, fixed_ip_address):
module.fail_json(msg = "error in fetching the floatingips's %s" % e.message)
if not ips['floatingips']:
return None, None
return ips['floatingips'][0]['id'], ips['floatingips'][0]['floating_ip_address']
for address in ips['floatingips']:
if _check_ips_network(neutron, address['floating_network_id'], network_name):
return address['id'], address['floating_ip_address']
return None, None
def _check_ips_network(neutron, net_id, network_name):
if neutron.show_network(net_id)['network']['name'] == network_name:
return True
else:
return False
def _create_floating_ip(neutron, module, port_id, net_id, fixed_ip):
kwargs = {
@ -229,9 +248,12 @@ def main():
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-novaclient, python-keystoneclient, and either python-neutronclient or python-quantumclient are required')
try:
nova = nova_client.Client(module.params['login_username'], module.params['login_password'],
module.params['login_tenant_name'], module.params['auth_url'], service_type='compute')
module.params['login_tenant_name'], module.params['auth_url'], region_name=module.params['region_name'], service_type='compute')
neutron = _get_neutron_client(module, module.params)
except Exception, e:
module.fail_json(msg="Error in authenticating to nova: %s" % e.message)
@ -244,7 +266,7 @@ def main():
if not port_id:
module.fail_json(msg="Cannot find a port for this instance, maybe fixed ip is not assigned")
floating_id, floating_ip = _get_floating_ip(module, neutron, fixed_ip)
floating_id, floating_ip = _get_floating_ip(module, neutron, fixed_ip, module.params['network_name'])
if module.params['state'] == 'present':
if floating_ip:
@ -262,5 +284,6 @@ def main():
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
if __name__ == '__main__':
main()

@ -16,6 +16,7 @@
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import time
try:
from novaclient.v1_1 import client as nova_client
try:
@ -23,14 +24,15 @@ try:
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
import time
HAVE_DEPS = True
except ImportError:
print "failed=True msg='novaclient, keystone, and quantumclient (or neutronclient) client are required'"
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_floating_ip_associate
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
short_description: Associate or disassociate a particular floating IP with an instance
description:
- Associates or disassociates a specific floating IP with a particular instance
@ -75,7 +77,11 @@ options:
- floating ip that should be assigned to the instance
required: true
default: None
requirements: ["quantumclient", "neutronclient", "keystoneclient"]
requirements:
- "python >= 2.6"
- "python-novaclient"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
@ -186,6 +192,9 @@ def main():
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-novaclient, python-keystoneclient, and either python-neutronclient or python-quantumclient are required')
try:
nova = nova_client.Client(module.params['login_username'], module.params['login_password'],
module.params['login_tenant_name'], module.params['auth_url'], service_type='compute')
@ -214,5 +223,6 @@ def main():
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
if __name__ == '__main__':
main()

@ -22,13 +22,15 @@ try:
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'")
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_router
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
short_description: Create or Remove router from openstack
description:
- Create or Delete routers from OpenStack
@ -78,7 +80,10 @@ options:
- desired admin state of the created router .
required: false
default: true
requirements: ["quantumclient", "neutronclient", "keystoneclient"]
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
@ -183,6 +188,8 @@ def main():
admin_state_up = dict(type='bool', default=True),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
@ -206,5 +213,6 @@ def main():
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
if __name__ == '__main__':
main()

@ -22,12 +22,15 @@ try:
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'")
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_router_gateway
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
short_description: set/unset a gateway interface for the router with the specified external network
description:
- Creates/Removes a gateway interface from the router, used to associate a external network with a router to route external traffic.
@ -72,7 +75,10 @@ options:
- Name of the external network which should be attached to the router.
required: true
default: None
requirements: ["quantumclient", "neutronclient", "keystoneclient"]
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
@ -181,6 +187,8 @@ def main():
state = dict(default='present', choices=['absent', 'present']),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
neutron = _get_neutron_client(module, module.params)
router_id = _get_router_id(module, neutron)
@ -209,5 +217,6 @@ def main():
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
if __name__ == '__main__':
main()

@ -22,12 +22,15 @@ try:
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'")
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_router_interface
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
short_description: Attach/Dettach a subnet's interface to a router
description:
- Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet.
@ -77,7 +80,10 @@ options:
- Name of the tenant whose subnet has to be attached.
required: false
default: None
requirements: ["quantumclient", "keystoneclient"]
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
@ -216,6 +222,8 @@ def main():
state = dict(default='present', choices=['absent', 'present']),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
@ -245,5 +253,6 @@ def main():
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
if __name__ == '__main__':
main()

@ -182,7 +182,9 @@ options:
description:
- how long before wait gives up, in seconds
default: 300
author: Jesse Keating, Matt Martz
author:
- "Jesse Keating (@j2sol)"
- "Matt Martz (@sivel)"
notes:
- I(exact_count) can be "destructive" if the number of running servers in
the I(group) is larger than that specified in I(count). In such a case, the

@ -79,7 +79,9 @@ options:
description:
- how long before wait gives up, in seconds
default: 300
author: Christopher H. Laco, Matt Martz
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack
'''

@ -58,7 +58,9 @@ options:
description:
- how long before wait gives up, in seconds
default: 300
author: Christopher H. Laco, Matt Martz
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack
'''

@ -52,7 +52,7 @@ options:
description:
- how long before wait gives up, in seconds
default: 300
author: Simon JAILLET
author: "Simon JAILLET (@jails)"
extends_documentation_fragment: rackspace
'''

@ -44,7 +44,7 @@ options:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
author: Simon JAILLET
author: "Simon JAILLET (@jails)"
extends_documentation_fragment: rackspace
'''

@ -51,7 +51,7 @@ options:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
author: Simon JAILLET
author: "Simon JAILLET (@jails)"
extends_documentation_fragment: rackspace
'''

@ -103,7 +103,9 @@ options:
description:
- how long before wait gives up, in seconds
default: 300
author: Christopher H. Laco, Matt Martz
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''

@ -85,7 +85,7 @@ options:
required: false
description:
- Weight of node
author: Lukasz Kawczynski
author: "Lukasz Kawczynski (@neuroid)"
extends_documentation_fragment: rackspace
'''

@ -48,7 +48,7 @@ notes:
- "It is recommended that plays utilizing this module be run with
C(serial: 1) to avoid exceeding the API request limit imposed by
the Rackspace CloudDNS API"
author: Matt Martz
author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''

@ -84,7 +84,7 @@ notes:
supplied
- As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
- C(PTR) record support was added in version 1.7
author: Matt Martz
author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''

@ -35,7 +35,7 @@ options:
description:
- Server name to retrieve facts for
default: null
author: Matt Martz
author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack
'''

@ -76,7 +76,7 @@ options:
web_index:
description:
- Sets an object to be presented as the HTTP index page when accessed by the CDN URL
author: Paul Durivage
author: "Paul Durivage (@angstwad)"
extends_documentation_fragment: rackspace
'''

@ -92,7 +92,7 @@ options:
- file
- meta
default: file
author: Paul Durivage
author: "Paul Durivage (@angstwad)"
extends_documentation_fragment: rackspace
'''

@ -29,7 +29,9 @@ options:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
author: Christopher H. Laco, Matt Martz
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack
'''

@ -39,7 +39,7 @@ options:
- present
- absent
default: present
author: Matt Martz
author: "Matt Martz (@sivel)"
notes:
- Keypairs cannot be manipulated, only created and deleted. To "update" a
keypair you must first delete and then recreate.

@ -39,7 +39,7 @@ options:
description:
- A hash of metadata to associate with the instance
default: null
author: Matt Martz
author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack
'''

@ -39,7 +39,9 @@ options:
description:
- cidr of the network being created
default: null
author: Christopher H. Laco, Jesse Keating
author:
- "Christopher H. Laco (@claco)"
- "Jesse Keating (@j2sol)"
extends_documentation_fragment: rackspace.openstack
'''

@ -35,7 +35,9 @@ options:
- present
- absent
default: present
author: Christopher H. Laco, Matt Martz
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''

@ -105,7 +105,7 @@ options:
- Data to be uploaded to the servers config drive. This option implies
I(config_drive). Can be a file path or a string
version_added: 1.8
author: Matt Martz
author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''
@ -263,7 +263,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
lc = sg.get_launch_config()
lc_args = {}
if server_name != lc.get('name'):
lc_args['name'] = server_name
lc_args['server_name'] = server_name
if image != lc.get('image'):
lc_args['image'] = image
@ -273,7 +273,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
disk_config = disk_config or 'AUTO'
if ((disk_config or lc.get('disk_config')) and
disk_config != lc.get('disk_config')):
disk_config != lc.get('disk_config', 'AUTO')):
lc_args['disk_config'] = disk_config
if (meta or lc.get('meta')) and meta != lc.get('metadata'):
@ -299,7 +299,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
if key_name != lc.get('key_name'):
lc_args['key_name'] = key_name
if config_drive != lc.get('config_drive'):
if config_drive != lc.get('config_drive', False):
lc_args['config_drive'] = config_drive
if (user_data and

@ -73,7 +73,7 @@ options:
- present
- absent
default: present
author: Matt Martz
author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''

@ -119,8 +119,10 @@ options:
notes:
- This module should run from a system that can access vSphere directly.
Either by using local_action, or using delegate_to.
author: Richard Hoop <wrhoop@gmail.com>
requirements: [ pysphere ]
author: "Richard Hoop (@rhoop) <wrhoop@gmail.com>"
requirements:
- "python >= 2.6"
- pysphere
'''
@ -151,11 +153,18 @@ EXAMPLES = '''
type: vmxnet3
network: VM Network
network_type: standard
nic2:
type: vmxnet3
network: dvSwitch Network
network_type: dvs
vm_hardware:
memory_mb: 2048
num_cpus: 2
osid: centos64Guest
scsi: paravirtual
vm_cdrom:
type: "iso"
iso_path: "DatastoreName/cd-image.iso"
esxi:
datacenter: MyDatacenter
hostname: esx001.mydomain.local
@ -408,13 +417,21 @@ def add_nic(module, s, nfmor, config, devices, nic_type="vmxnet3", network_name=
def find_datastore(module, s, datastore, config_target):
# Verify the datastore exists and put it in brackets if it does.
ds = None
for d in config_target.Datastore:
if (d.Datastore.Accessible and
(datastore and d.Datastore.Name == datastore)
or (not datastore)):
ds = d.Datastore.Datastore
datastore = d.Datastore.Name
break
if config_target:
for d in config_target.Datastore:
if (d.Datastore.Accessible and
(datastore and d.Datastore.Name == datastore)
or (not datastore)):
ds = d.Datastore.Datastore
datastore = d.Datastore.Name
break
else:
for ds_mor, ds_name in server.get_datastores().items():
ds_props = VIProperty(s, ds_mor)
if (ds_props.summary.accessible and (datastore and ds_name == datastore)
or (not datastore)):
ds = ds_mor
datastore = ds_name
if not ds:
s.disconnect()
module.fail_json(msg="Datastore: %s does not appear to exist" %
@ -517,22 +534,74 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
vmTemplate = vsphere_client.get_vm_by_name(template_src)
vmTarget = None
try:
cluster = [k for k,
v in vsphere_client.get_clusters().items() if v == cluster_name][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Cluster named: %s" %
cluster_name)
if esxi:
datacenter = esxi['datacenter']
esxi_hostname = esxi['hostname']
try:
rpmor = [k for k, v in vsphere_client.get_resource_pools(
from_mor=cluster).items()
if v == resource_pool][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Resource Pool named: %s" %
resource_pool)
# Datacenter managed object reference
dclist = [k for k,
v in vsphere_client.get_datacenters().items() if v == datacenter]
if dclist:
dcmor=dclist[0]
else:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
dcprops = VIProperty(vsphere_client, dcmor)
# hostFolder managed reference
hfmor = dcprops.hostFolder._obj
# Grab the computerResource name and host properties
crmors = vsphere_client._retrieve_properties_traversal(
property_names=['name', 'host'],
from_node=hfmor,
obj_type='ComputeResource')
# Grab the host managed object reference of the esxi_hostname
try:
hostmor = [k for k,
v in vsphere_client.get_hosts().items() if v == esxi_hostname][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find esx host named: %s" % esxi_hostname)
# Grab the computeResource managed object reference of the host we are
# creating the VM on.
crmor = None
for cr in crmors:
if crmor:
break
for p in cr.PropSet:
if p.Name == "host":
for h in p.Val.get_element_ManagedObjectReference():
if h == hostmor:
crmor = cr.Obj
break
if crmor:
break
crprops = VIProperty(vsphere_client, crmor)
rpmor = crprops.resourcePool._obj
elif resource_pool:
try:
cluster = [k for k,
v in vsphere_client.get_clusters().items() if v == cluster_name][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Cluster named: %s" %
cluster_name)
try:
rpmor = [k for k, v in vsphere_client.get_resource_pools(
from_mor=cluster).items()
if v == resource_pool][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Resource Pool named: %s" %
resource_pool)
else:
module.fail_json(msg="You need to specify either esxi:[datacenter,hostname] or [cluster,resource_pool]")
try:
vmTarget = vsphere_client.get_vm_by_name(guest)
@ -562,13 +631,14 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
changes = {}
request = VI.ReconfigVM_TaskRequestMsg()
shutdown = False
poweron = vm.is_powered_on()
memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled)
cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled)
cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled)
# Change Memory
if vm_hardware['memory_mb']:
if 'memory_mb' in vm_hardware:
if int(vm_hardware['memory_mb']) != vm.properties.config.hardware.memoryMB:
spec = spec_singleton(spec, request, vm)
@ -598,7 +668,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
changes['memory'] = vm_hardware['memory_mb']
# ====( Config Memory )====#
if vm_hardware['num_cpus']:
if 'num_cpus' in vm_hardware:
if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU:
spec = spec_singleton(spec, request, vm)
@ -652,7 +722,7 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
module.fail_json(
msg="Error reconfiguring vm: %s" % task.get_error_message())
if vm.is_powered_off():
if vm.is_powered_off() and poweron:
try:
vm.power_on(sync_run=True)
except Exception, e:
@ -1319,5 +1389,6 @@ def main():
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()

@ -81,7 +81,9 @@ notes:
M(command) module is much more secure as it's not affected by the user's
environment.
- " C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not exist, use this."
author: Michael DeHaan
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
@ -154,12 +156,22 @@ def main():
# the command module is the one ansible module that does not take key=value args
# hence don't copy this one if you are looking to build others!
module = CommandModule(argument_spec=dict())
module = AnsibleModule(
argument_spec=dict(
_raw_params = dict(),
_uses_shell = dict(type='bool', default=False),
chdir = dict(),
executable = dict(),
creates = dict(),
removes = dict(),
warn = dict(type='bool', default=True),
)
)
shell = module.params['shell']
shell = module.params['_uses_shell']
chdir = module.params['chdir']
executable = module.params['executable']
args = module.params['args']
args = module.params['_raw_params']
creates = module.params['creates']
removes = module.params['removes']
warn = module.params['warn']
@ -168,6 +180,7 @@ def main():
module.fail_json(rc=256, msg="no command given")
if chdir:
chdir = os.path.abspath(os.path.expanduser(chdir))
os.chdir(chdir)
if creates:
@ -232,48 +245,4 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.splitter import *
# only the command module should ever need to do this
# everything else should be simple key=value
class CommandModule(AnsibleModule):
def _handle_aliases(self):
return {}
def _check_invalid_arguments(self):
pass
def _load_params(self):
''' read the input and return a dictionary and the arguments string '''
args = MODULE_ARGS
params = copy.copy(OPTIONS)
params['shell'] = False
if "#USE_SHELL" in args:
args = args.replace("#USE_SHELL", "")
params['shell'] = True
items = split_args(args)
for x in items:
quoted = x.startswith('"') and x.endswith('"') or x.startswith("'") and x.endswith("'")
if '=' in x and not quoted:
# check to see if this is a special parameter for the command
k, v = x.split('=', 1)
v = unquote(v.strip())
if k in OPTIONS.keys():
if k == "chdir":
v = os.path.abspath(os.path.expanduser(v))
if not (os.path.exists(v) and os.path.isdir(v)):
self.fail_json(rc=258, msg="cannot change to directory '%s': path does not exist" % v)
elif k == "executable":
v = os.path.abspath(os.path.expanduser(v))
if not (os.path.exists(v)):
self.fail_json(rc=258, msg="cannot use executable '%s': file does not exist" % v)
params[k] = v
# Remove any of the above k=v params from the args string
args = PARAM_REGEX.sub('', args)
params['args'] = args.strip()
return (params, params['args'])
main()

@ -34,7 +34,9 @@ notes:
playbooks will follow the trend of using M(command) unless M(shell) is
explicitly required. When running ad-hoc commands, use your best
judgement.
author: Michael DeHaan
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''

@ -32,7 +32,9 @@ options:
version_added: "1.5"
notes:
- It is usually preferable to write Ansible modules than pushing scripts. Convert your script to an Ansible module for bonus points!
author: Michael DeHaan
author:
- Ansible Core Team
- Michael DeHaan
"""
EXAMPLES = '''

@ -57,7 +57,9 @@ notes:
"{{ var | quote }}" instead of just "{{ var }}" to make sure they don't include evil things like semicolons.
requirements: [ ]
author: Michael DeHaan
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''

@ -30,6 +30,8 @@ options:
name:
description:
- name of the database to add or remove
- name=all May only be provided if I(state) is C(dump) or C(import).
- if name=all Works like --all-databases option for mysqldump (Added in 2.0)
required: true
default: null
aliases: [ db ]
@ -77,7 +79,7 @@ options:
target:
description:
- Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL
files (C(.sql)) as well as bzip2 (C(.bz2)) and gzip (C(.gz)) compressed files are supported.
files (C(.sql)) as well as bzip2 (C(.bz2)), gzip (C(.gz)) and xz compressed files are supported.
required: false
notes:
- Requires the MySQLdb Python package on the remote host. For Ubuntu, this
@ -87,7 +89,7 @@ notes:
the credentials from C(~/.my.cnf), and finally fall back to using the MySQL
default login of C(root) with no password.
requirements: [ ConfigParser ]
author: Mark Theunissen
author: "Mark Theunissen (@marktheunissen)"
'''
EXAMPLES = '''
@ -97,12 +99,19 @@ EXAMPLES = '''
# Copy database dump file to remote host and restore it to database 'my_db'
- copy: src=dump.sql.bz2 dest=/tmp
- mysql_db: name=my_db state=import target=/tmp/dump.sql.bz2
# Dumps all databases to hostname.sql
- mysql_db: state=dump name=all target=/tmp/{{ inventory_hostname }}.sql
# Imports file.sql similiar to mysql -u <username> -p <password> < hostname.sql
- mysql_db: state=import name=all target=/tmp/{{ inventory_hostname }}.sql
'''
import ConfigParser
import os
import pipes
import stat
import subprocess
try:
import MySQLdb
except ImportError:
@ -123,72 +132,76 @@ def db_delete(cursor, db):
cursor.execute(query)
return True
def db_dump(module, host, user, password, db_name, target, port, socket=None):
def db_dump(module, host, user, password, db_name, target, all_databases, port, socket=None):
cmd = module.get_bin_path('mysqldump', True)
cmd += " --quick --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password))
if socket is not None:
cmd += " --socket=%s" % pipes.quote(socket)
else:
cmd += " --host=%s --port=%i" % (pipes.quote(host), port)
cmd += " %s" % pipes.quote(db_name)
if all_databases:
cmd += " --all-databases"
else:
cmd += " %s" % pipes.quote(db_name)
path = None
if os.path.splitext(target)[-1] == '.gz':
cmd = cmd + ' | gzip > ' + pipes.quote(target)
path = module.get_bin_path('gzip', True)
elif os.path.splitext(target)[-1] == '.bz2':
cmd = cmd + ' | bzip2 > ' + pipes.quote(target)
path = module.get_bin_path('bzip2', True)
elif os.path.splitext(target)[-1] == '.xz':
path = module.get_bin_path('xz', True)
if path:
cmd = '%s | %s > %s' % (cmd, path, pipes.quote(target))
else:
cmd += " > %s" % pipes.quote(target)
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
return rc, stdout, stderr
def db_import(module, host, user, password, db_name, target, port, socket=None):
def db_import(module, host, user, password, db_name, target, all_databases, port, socket=None):
if not os.path.exists(target):
return module.fail_json(msg="target %s does not exist on the host" % target)
cmd = module.get_bin_path('mysql', True)
cmd += " --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password))
cmd = [module.get_bin_path('mysql', True)]
if user:
cmd.append("--user=%s" % pipes.quote(user))
if password:
cmd.append("--password=%s" % pipes.quote(password))
if socket is not None:
cmd += " --socket=%s" % pipes.quote(socket)
cmd.append("--socket=%s" % pipes.quote(socket))
else:
cmd += " --host=%s --port=%i" % (pipes.quote(host), port)
cmd += " -D %s" % pipes.quote(db_name)
cmd.append("--host=%s" % pipes.quote(host))
cmd.append("--port=%i" % port)
if not all_databases:
cmd.append("-D")
cmd.append(pipes.quote(db_name))
comp_prog_path = None
if os.path.splitext(target)[-1] == '.gz':
gzip_path = module.get_bin_path('gzip')
if not gzip_path:
module.fail_json(msg="gzip command not found")
#gzip -d file (uncompress)
rc, stdout, stderr = module.run_command('%s -d %s' % (gzip_path, target))
if rc != 0:
return rc, stdout, stderr
#Import sql
cmd += " < %s" % pipes.quote(os.path.splitext(target)[0])
try:
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
return rc, stdout, stderr
finally:
#gzip file back up
module.run_command('%s %s' % (gzip_path, os.path.splitext(target)[0]))
comp_prog_path = module.get_bin_path('gzip', required=True)
elif os.path.splitext(target)[-1] == '.bz2':
bzip2_path = module.get_bin_path('bzip2')
if not bzip2_path:
module.fail_json(msg="bzip2 command not found")
#bzip2 -d file (uncompress)
rc, stdout, stderr = module.run_command('%s -d %s' % (bzip2_path, target))
if rc != 0:
return rc, stdout, stderr
#Import sql
cmd += " < %s" % pipes.quote(os.path.splitext(target)[0])
try:
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
return rc, stdout, stderr
finally:
#bzip2 file back up
rc, stdout, stderr = module.run_command('%s %s' % (bzip2_path, os.path.splitext(target)[0]))
comp_prog_path = module.get_bin_path('bzip2', required=True)
elif os.path.splitext(target)[-1] == '.xz':
comp_prog_path = module.get_bin_path('xz', required=True)
if comp_prog_path:
p1 = subprocess.Popen([comp_prog_path, '-dc', target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout2, stderr2) = p2.communicate()
p1.stdout.close()
p1.wait()
if p1.returncode != 0:
stderr1 = p1.stderr.read()
return p1.returncode, '', stderr1
else:
return p2.returncode, stdout2, stderr2
else:
cmd = ' '.join(cmd)
cmd += " < %s" % pipes.quote(target)
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
return rc, stdout, stderr
return rc, stdout, stderr
def db_create(cursor, db, encoding, collation):
query_params = dict(enc=encoding, collate=collation)
@ -313,8 +326,16 @@ def main():
if state in ['dump','import']:
if target is None:
module.fail_json(msg="with state=%s target is required" % (state))
connect_to_db = db
if db == 'all':
connect_to_db = 'mysql'
db = 'mysql'
all_databases = True
else:
connect_to_db = db
all_databases = False
else:
if db == 'all':
module.fail_json(msg="name is not allowed to equal 'all' unless state equals import, or dump.")
connect_to_db = ''
try:
if socket:
@ -331,11 +352,11 @@ def main():
db_connection = MySQLdb.connect(host=module.params["login_host"], port=login_port, user=login_user, passwd=login_password, db=connect_to_db)
cursor = db_connection.cursor()
except Exception, e:
errno, errstr = e.args
if "Unknown database" in str(e):
errno, errstr = e.args
module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
else:
module.fail_json(msg="unable to connect, check login credentials (login_user, and login_password, which can be defined in ~/.my.cnf), check that mysql socket exists and mysql server is running")
module.fail_json(msg="unable to connect, check login credentials (login_user, and login_password, which can be defined in ~/.my.cnf), check that mysql socket exists and mysql server is running (ERROR: %s %s)" % (errno, errstr))
changed = False
if db_exists(cursor, db):
@ -346,7 +367,7 @@ def main():
module.fail_json(msg="error deleting database: " + str(e))
elif state == "dump":
rc, stdout, stderr = db_dump(module, login_host, login_user,
login_password, db, target,
login_password, db, target, all_databases,
port=login_port,
socket=module.params['login_unix_socket'])
if rc != 0:
@ -355,7 +376,7 @@ def main():
module.exit_json(changed=True, db=db, msg=stdout)
elif state == "import":
rc, stdout, stderr = db_import(module, login_host, login_user,
login_password, db, target,
login_password, db, target, all_databases,
port=login_port,
socket=module.params['login_unix_socket'])
if rc != 0:

@ -30,7 +30,6 @@ options:
description:
- name of the user (role) to add or remove
required: true
default: null
password:
description:
- set the user's password
@ -91,7 +90,8 @@ options:
description:
- Check if mysql allows login as root/nopassword before trying supplied credentials.
required: false
default: false
choices: [ "yes", "no" ]
default: "no"
version_added: "1.3"
update_password:
required: false
@ -100,7 +100,12 @@ options:
version_added: "2.0"
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
config_file:
description:
- Specify a config file from which user and password are to be read
required: false
default: '~/.my.cnf'
version_added: "2.0"
notes:
- Requires the MySQLdb Python package on the remote host. For Ubuntu, this
is as easy as apt-get install python-mysqldb.
@ -114,8 +119,8 @@ notes:
the new root credentials. Subsequent runs of the playbook will then succeed by reading the new credentials from
the file."
requirements: [ "ConfigParser", "MySQLdb" ]
author: Mark Theunissen
requirements: [ "MySQLdb" ]
author: "Mark Theunissen (@marktheunissen)"
'''
EXAMPLES = """
@ -125,7 +130,7 @@ EXAMPLES = """
# Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION'
- mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present
# Modifiy user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself.
# Modify user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself.
- mysql_user: name=bob append_privs=true priv=*.*:REQUIRESSL state=present
# Ensure no user named 'sally' exists, also passing in the auth credentials.
@ -144,15 +149,12 @@ mydb.*:INSERT,UPDATE/anotherdb.*:SELECT/yetanotherdb.*:ALL
- mysql_user: name=root password=abc123 login_unix_socket=/var/run/mysqld/mysqld.sock
# Example .my.cnf file for setting the root password
# Note: don't use quotes around the password, because the mysql_user module
# will include them in the password but the mysql client will not
[client]
user=root
password=n<_665{vS43y
"""
import ConfigParser
import getpass
import tempfile
try:
@ -179,6 +181,30 @@ class InvalidPrivsError(Exception):
# MySQL module specific support methods.
#
def connect(module, login_user=None, login_password=None, config_file=''):
config = {
'host': module.params['login_host'],
'db': 'mysql'
}
if module.params['login_unix_socket']:
config['unix_socket'] = module.params['login_unix_socket']
else:
config['port'] = module.params['login_port']
if os.path.exists(config_file):
config['read_default_file'] = config_file
# If login_user or login_password are given, they should override the
# config file
if login_user is not None:
config['user'] = login_user
if login_password is not None:
config['passwd'] = login_password
db_connection = MySQLdb.connect(**config)
return db_connection.cursor()
def user_exists(cursor, user, host):
cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host))
count = cursor.fetchone()
@ -217,7 +243,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs):
grant_option = True
if db_table not in new_priv:
if user != "root" and "PROXY" not in priv and not append_privs:
privileges_revoke(cursor, user,host,db_table,grant_option)
privileges_revoke(cursor, user,host,db_table,priv,grant_option)
changed = True
# If the user doesn't currently have any privileges on a db.table, then
@ -234,7 +260,7 @@ def user_mod(cursor, user, host, password, new_priv, append_privs):
priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table])
if (len(priv_diff) > 0):
if not append_privs:
privileges_revoke(cursor, user,host,db_table,grant_option)
privileges_revoke(cursor, user,host,db_table,curr_priv[db_table],grant_option)
privileges_grant(cursor, user,host,db_table,new_priv[db_table])
changed = True
@ -292,14 +318,10 @@ def privileges_unpack(priv):
output = {}
for item in priv.strip().split('/'):
pieces = item.strip().split(':')
if '.' in pieces[0]:
pieces[0] = pieces[0].split('.')
for idx, piece in enumerate(pieces):
if pieces[0][idx] != "*":
pieces[0][idx] = "`" + pieces[0][idx] + "`"
pieces[0] = '.'.join(pieces[0])
output[pieces[0]] = pieces[1].upper().split(',')
dbpriv = pieces[0].rsplit(".", 1)
pieces[0] = "`%s`.%s" % (dbpriv[0].strip('`'), dbpriv[1])
output[pieces[0]] = [s.strip() for s in pieces[1].upper().split(',')]
new_privs = frozenset(output[pieces[0]])
if not new_privs.issubset(VALID_PRIVS):
raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS))
@ -314,7 +336,7 @@ def privileges_unpack(priv):
return output
def privileges_revoke(cursor, user,host,db_table,grant_option):
def privileges_revoke(cursor, user,host,db_table,priv,grant_option):
# Escape '%' since mysql db.execute() uses a format string
db_table = db_table.replace('%', '%%')
if grant_option:
@ -322,7 +344,8 @@ def privileges_revoke(cursor, user,host,db_table,grant_option):
query.append("FROM %s@%s")
query = ' '.join(query)
cursor.execute(query, (user, host))
query = ["REVOKE ALL PRIVILEGES ON %s" % mysql_quote_identifier(db_table, 'table')]
priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')])
query = ["REVOKE %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))]
query.append("FROM %s@%s")
query = ' '.join(query)
cursor.execute(query, (user, host))
@ -331,7 +354,7 @@ def privileges_grant(cursor, user,host,db_table,priv):
# Escape '%' since mysql db.execute uses a format string and the
# specification of db and table often use a % (SQL wildcard)
db_table = db_table.replace('%', '%%')
priv_string = ",".join(filter(lambda x: x not in [ 'GRANT', 'REQUIRESSL' ], priv))
priv_string = ",".join([p for p in priv if p not in ('GRANT', 'REQUIRESSL')])
query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))]
query.append("TO %s@%s")
if 'GRANT' in priv:
@ -341,100 +364,6 @@ def privileges_grant(cursor, user,host,db_table,priv):
query = ' '.join(query)
cursor.execute(query, (user, host))
def strip_quotes(s):
""" Remove surrounding single or double quotes
>>> print strip_quotes('hello')
hello
>>> print strip_quotes('"hello"')
hello
>>> print strip_quotes("'hello'")
hello
>>> print strip_quotes("'hello")
'hello
"""
single_quote = "'"
double_quote = '"'
if s.startswith(single_quote) and s.endswith(single_quote):
s = s.strip(single_quote)
elif s.startswith(double_quote) and s.endswith(double_quote):
s = s.strip(double_quote)
return s
def config_get(config, section, option):
""" Calls ConfigParser.get and strips quotes
See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html
"""
return strip_quotes(config.get(section, option))
def _safe_cnf_load(config, path):
data = {'user':'', 'password':''}
# read in user/pass
f = open(path, 'r')
for line in f.readlines():
line = line.strip()
if line.startswith('user='):
data['user'] = line.split('=', 1)[1].strip()
if line.startswith('password=') or line.startswith('pass='):
data['password'] = line.split('=', 1)[1].strip()
f.close()
# write out a new cnf file with only user/pass
fh, newpath = tempfile.mkstemp(prefix=path + '.')
f = open(newpath, 'wb')
f.write('[client]\n')
f.write('user=%s\n' % data['user'])
f.write('password=%s\n' % data['password'])
f.close()
config.readfp(open(newpath))
os.remove(newpath)
return config
def load_mycnf():
config = ConfigParser.RawConfigParser()
mycnf = os.path.expanduser('~/.my.cnf')
if not os.path.exists(mycnf):
return False
try:
config.readfp(open(mycnf))
except (IOError):
return False
except:
config = _safe_cnf_load(config, mycnf)
# We support two forms of passwords in .my.cnf, both pass= and password=,
# as these are both supported by MySQL.
try:
passwd = config_get(config, 'client', 'password')
except (ConfigParser.NoOptionError):
try:
passwd = config_get(config, 'client', 'pass')
except (ConfigParser.NoOptionError):
return False
# If .my.cnf doesn't specify a user, default to user login name
try:
user = config_get(config, 'client', 'user')
except (ConfigParser.NoOptionError):
user = getpass.getuser()
creds = dict(user=user,passwd=passwd)
return creds
def connect(module, login_user, login_password):
if module.params["login_unix_socket"]:
db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql")
else:
db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql")
return db_connection.cursor()
# ===========================================
# Module execution.
#
@ -448,24 +377,29 @@ def main():
login_port=dict(default=3306, type='int'),
login_unix_socket=dict(default=None),
user=dict(required=True, aliases=['name']),
password=dict(default=None),
password=dict(default=None, no_log=True),
host=dict(default="localhost"),
state=dict(default="present", choices=["absent", "present"]),
priv=dict(default=None),
append_privs=dict(type="bool", default="no"),
check_implicit_admin=dict(default=False),
append_privs=dict(default=False, type='bool'),
check_implicit_admin=dict(default=False, type='bool'),
update_password=dict(default="always", choices=["always", "on_create"]),
config_file=dict(default="~/.my.cnf"),
)
)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
user = module.params["user"]
password = module.params["password"]
host = module.params["host"]
host = module.params["host"].lower()
state = module.params["state"]
priv = module.params["priv"]
check_implicit_admin = module.params['check_implicit_admin']
config_file = module.params['config_file']
append_privs = module.boolean(module.params["append_privs"])
update_password = module.params['update_password']
config_file = os.path.expanduser(os.path.expandvars(config_file))
if not mysqldb_found:
module.fail_json(msg="the python mysqldb module is required")
@ -475,34 +409,18 @@ def main():
except Exception, e:
module.fail_json(msg="invalid privileges string: %s" % str(e))
# Either the caller passes both a username and password with which to connect to
# mysql, or they pass neither and allow this module to read the credentials from
# ~/.my.cnf.
login_password = module.params["login_password"]
login_user = module.params["login_user"]
if login_user is None and login_password is None:
mycnf_creds = load_mycnf()
if mycnf_creds is False:
login_user = "root"
login_password = ""
else:
login_user = mycnf_creds["user"]
login_password = mycnf_creds["passwd"]
elif login_password is None or login_user is None:
module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided")
cursor = None
try:
if check_implicit_admin:
try:
cursor = connect(module, 'root', '')
cursor = connect(module, 'root', '', config_file)
except:
pass
if not cursor:
cursor = connect(module, login_user, login_password)
cursor = connect(module, login_user, login_password, config_file)
except Exception, e:
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials")
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials. Exception message: %s" % e)
if state == "present":
if user_exists(cursor, user, host):

@ -30,6 +30,7 @@ short_description: Manage MySQL global variables
description:
- Query / Set MySQL variables
version_added: 1.3
author: "Balazs Pocze (@banyek)"
options:
variable:
description:

@ -95,7 +95,7 @@ notes:
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module.
requirements: [ psycopg2 ]
author: Lorin Hochstein
author: "Lorin Hochstein (@lorin)"
'''
EXAMPLES = '''

@ -136,7 +136,7 @@ notes:
another user also, R can still access database objects via these privileges.
- When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs).
requirements: [psycopg2]
author: Bernhard Weitzhofer
author: "Bernhard Weitzhofer (@b6d)"
"""
EXAMPLES = """

@ -44,7 +44,7 @@ options:
password:
description:
- set the user's password, before 1.4 this was required.
- "When passing an encrypted password, the encrypted parameter must also be true, and it must be generated with the format C('str[\\"md5\\"] + md5[ password + username ]'), resulting in a total of 35 characters. An easy way to do this is: C(echo \\"md5`echo -n \\"verysecretpasswordJOE\\" | md5`\\")."
- "When passing an encrypted password, the encrypted parameter must also be true, and it must be generated with the format C('str[\\"md5\\"] + md5[ password + username ]'), resulting in a total of 35 characters. An easy way to do this is: C(echo \\"md5`echo -n \\"verysecretpasswordJOE\\" | md5`\\"). Note that if encrypted is set, the stored password will be hashed whether or not it is pre-encrypted."
required: false
default: null
db:
@ -103,7 +103,7 @@ options:
choices: [ "present", "absent" ]
encrypted:
description:
- denotes if the password is already encrypted. boolean.
- whether the password is stored hashed in the database. boolean. Passwords can be passed already hashed or unhashed, and postgresql ensures the stored password is hashed when encrypted is set.
required: false
default: false
version_added: '1.4'
@ -129,11 +129,15 @@ notes:
PostgreSQL must also be installed on the remote host. For Ubuntu-based
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
- If the passlib library is installed, then passwords that are encrypted
in the DB but not encrypted when passed as arguments can be checked for
changes. If the passlib library is not installed, unencrypted passwords
stored in the DB encrypted will be assumed to have changed.
- If you specify PUBLIC as the user, then the privilege changes will apply
to all users. You may not specify password or role_attr_flags when the
PUBLIC user is specified.
requirements: [ psycopg2 ]
author: Lorin Hochstein
author: "Lorin Hochstein (@lorin)"
'''
EXAMPLES = '''
@ -161,6 +165,7 @@ import itertools
try:
import psycopg2
import psycopg2.extras
except ImportError:
postgresqldb_found = False
else:
@ -169,10 +174,16 @@ else:
_flags = ('SUPERUSER', 'CREATEROLE', 'CREATEUSER', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
VALID_FLAGS = frozenset(itertools.chain(_flags, ('NO%s' % f for f in _flags)))
VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL', 'USAGE')),
database=frozenset(('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL', 'USAGE')),
VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')),
database=frozenset(('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')),
)
# map to cope with idiosyncracies of SUPERUSER and LOGIN
PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole',
CREATEUSER='rolcreateuser', CREATEDB='rolcreatedb',
INHERIT='rolinherit', LOGIN='rolcanlogin',
REPLICATION='rolreplication')
class InvalidFlagsError(Exception):
pass
@ -230,8 +241,45 @@ def user_alter(cursor, module, user, password, role_attr_flags, encrypted, expir
# Grab current role attributes.
current_role_attrs = cursor.fetchone()
alter = ['ALTER USER %(user)s' % {"user": pg_quote_identifier(user, 'role')}]
# Do we actually need to do anything?
pwchanging = False
if password is not None:
if encrypted:
if password.startswith('md5'):
if password != current_role_attrs['rolpassword']:
pwchanging = True
else:
try:
from passlib.hash import postgres_md5 as pm
if pm.encrypt(password, user) != current_role_attrs['rolpassword']:
pwchanging = True
except ImportError:
# Cannot check if passlib is not installed, so assume password is different
pwchanging = True
else:
if password != current_role_attrs['rolpassword']:
pwchanging = True
role_attr_flags_changing = False
if role_attr_flags:
role_attr_flags_dict = {}
for r in role_attr_flags.split(' '):
if r.startswith('NO'):
role_attr_flags_dict[r.replace('NO', '', 1)] = False
else:
role_attr_flags_dict[r] = True
for role_attr_name, role_attr_value in role_attr_flags_dict.items():
if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
role_attr_flags_changing = True
expires_changing = (expires is not None and expires == current_roles_attrs['rol_valid_until'])
if not pwchanging and not role_attr_flags_changing and not expires_changing:
return False
alter = ['ALTER USER %(user)s' % {"user": pg_quote_identifier(user, 'role')}]
if pwchanging:
alter.append("WITH %(crypt)s" % {"crypt": encrypted})
alter.append("PASSWORD %(password)s")
alter.append(role_attr_flags)
@ -276,10 +324,21 @@ def user_delete(cursor, user):
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
return True
def has_table_privilege(cursor, user, table, priv):
query = 'SELECT has_table_privilege(%s, %s, %s)'
cursor.execute(query, (user, table, priv))
return cursor.fetchone()[0]
def has_table_privileges(cursor, user, table, privs):
"""
Return the difference between the privileges that a user already has and
the privileges that they desire to have.
:returns: tuple of:
* privileges that they have and were requested
* privileges they currently hold but were not requested
* privileges requested that they do not hold
"""
cur_privs = get_table_privileges(cursor, user, table)
have_currently = cur_privs.intersection(privs)
other_current = cur_privs.difference(privs)
desired = privs.difference(cur_privs)
return (have_currently, other_current, desired)
def get_table_privileges(cursor, user, table):
if '.' in table:
@ -289,26 +348,21 @@ def get_table_privileges(cursor, user, table):
query = '''SELECT privilege_type FROM information_schema.role_table_grants
WHERE grantee=%s AND table_name=%s AND table_schema=%s'''
cursor.execute(query, (user, table, schema))
return set([x[0] for x in cursor.fetchall()])
return frozenset([x[0] for x in cursor.fetchall()])
def grant_table_privilege(cursor, user, table, priv):
def grant_table_privileges(cursor, user, table, privs):
# Note: priv escaped by parse_privs
prev_priv = get_table_privileges(cursor, user, table)
privs = ', '.join(privs)
query = 'GRANT %s ON TABLE %s TO %s' % (
priv, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') )
privs, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') )
cursor.execute(query)
curr_priv = get_table_privileges(cursor, user, table)
return len(curr_priv) > len(prev_priv)
def revoke_table_privilege(cursor, user, table, priv):
def revoke_table_privileges(cursor, user, table, privs):
# Note: priv escaped by parse_privs
prev_priv = get_table_privileges(cursor, user, table)
privs = ', '.join(privs)
query = 'REVOKE %s ON TABLE %s FROM %s' % (
priv, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') )
privs, pg_quote_identifier(table, 'table'), pg_quote_identifier(user, 'role') )
cursor.execute(query)
curr_priv = get_table_privileges(cursor, user, table)
return len(curr_priv) < len(prev_priv)
def get_database_privileges(cursor, user, db):
priv_map = {
@ -320,80 +374,92 @@ def get_database_privileges(cursor, user, db):
cursor.execute(query, (db,))
datacl = cursor.fetchone()[0]
if datacl is None:
return []
return set()
r = re.search('%s=(C?T?c?)/[a-z]+\,?' % user, datacl)
if r is None:
return []
o = []
return set()
o = set()
for v in r.group(1):
o.append(priv_map[v])
return o
o.add(priv_map[v])
return normalize_privileges(o, 'database')
def has_database_privilege(cursor, user, db, priv):
query = 'SELECT has_database_privilege(%s, %s, %s)'
cursor.execute(query, (user, db, priv))
return cursor.fetchone()[0]
def has_database_privileges(cursor, user, db, privs):
"""
Return the difference between the privileges that a user already has and
the privileges that they desire to have.
:returns: tuple of:
* privileges that they have and were requested
* privileges they currently hold but were not requested
* privileges requested that they do not hold
"""
cur_privs = get_database_privileges(cursor, user, db)
have_currently = cur_privs.intersection(privs)
other_current = cur_privs.difference(privs)
desired = privs.difference(cur_privs)
return (have_currently, other_current, desired)
def grant_database_privilege(cursor, user, db, priv):
def grant_database_privileges(cursor, user, db, privs):
# Note: priv escaped by parse_privs
prev_priv = get_database_privileges(cursor, user, db)
privs =', '.join(privs)
if user == "PUBLIC":
query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
priv, pg_quote_identifier(db, 'database'))
privs, pg_quote_identifier(db, 'database'))
else:
query = 'GRANT %s ON DATABASE %s TO %s' % (
priv, pg_quote_identifier(db, 'database'),
privs, pg_quote_identifier(db, 'database'),
pg_quote_identifier(user, 'role'))
cursor.execute(query)
curr_priv = get_database_privileges(cursor, user, db)
return len(curr_priv) > len(prev_priv)
def revoke_database_privilege(cursor, user, db, priv):
def revoke_database_privileges(cursor, user, db, privs):
# Note: priv escaped by parse_privs
prev_priv = get_database_privileges(cursor, user, db)
privs = ', '.join(privs)
if user == "PUBLIC":
query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
priv, pg_quote_identifier(db, 'database'))
privs, pg_quote_identifier(db, 'database'))
else:
query = 'REVOKE %s ON DATABASE %s FROM %s' % (
priv, pg_quote_identifier(db, 'database'),
privs, pg_quote_identifier(db, 'database'),
pg_quote_identifier(user, 'role'))
cursor.execute(query)
curr_priv = get_database_privileges(cursor, user, db)
return len(curr_priv) < len(prev_priv)
def revoke_privileges(cursor, user, privs):
if privs is None:
return False
revoke_funcs = dict(table=revoke_table_privileges, database=revoke_database_privileges)
check_funcs = dict(table=has_table_privileges, database=has_database_privileges)
changed = False
for type_ in privs:
revoke_func = {
'table':revoke_table_privilege,
'database':revoke_database_privilege
}[type_]
for name, privileges in privs[type_].iteritems():
for privilege in privileges:
changed = revoke_func(cursor, user, name, privilege)\
or changed
# Check that any of the privileges requested to be removed are
# currently granted to the user
differences = check_funcs[type_](cursor, user, name, privileges)
if differences[0]:
revoke_funcs[type_](cursor, user, name, privileges)
changed = True
return changed
def grant_privileges(cursor, user, privs):
if privs is None:
return False
grant_funcs = dict(table=grant_table_privileges, database=grant_database_privileges)
check_funcs = dict(table=has_table_privileges, database=has_database_privileges)
grant_funcs = dict(table=grant_table_privileges, database=grant_database_privileges)
check_funcs = dict(table=has_table_privileges, database=has_database_privileges)
changed = False
for type_ in privs:
grant_func = {
'table':grant_table_privilege,
'database':grant_database_privilege
}[type_]
for name, privileges in privs[type_].iteritems():
for privilege in privileges:
changed = grant_func(cursor, user, name, privilege)\
or changed
# Check that any of the privileges requested for the user are
# currently missing
differences = check_funcs[type_](cursor, user, name, privileges)
if differences[2]:
grant_funcs[type_](cursor, user, name, privileges)
changed = True
return changed
def parse_role_attrs(role_attr_flags):
@ -422,6 +488,17 @@ def parse_role_attrs(role_attr_flags):
o_flags = ' '.join(flag_set)
return o_flags
def normalize_privileges(privs, type_):
new_privs = set(privs)
if 'ALL' in privs:
new_privs.update(VALID_PRIVS[type_])
new_privs.remove('ALL')
if 'TEMP' in privs:
new_privs.add('TEMPORARY')
new_privs.remove('TEMP')
return new_privs
def parse_privs(privs, db):
"""
Parse privilege string to determine permissions for database db.
@ -454,6 +531,8 @@ def parse_privs(privs, db):
if not priv_set.issubset(VALID_PRIVS[type_]):
raise InvalidPrivsError('Invalid privs specified for %s: %s' %
(type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
priv_set = normalize_privileges(priv_set, type_)
o_privs[type_][name] = priv_set
return o_privs
@ -527,7 +606,7 @@ def main():
try:
db_connection = psycopg2.connect(**kw)
cursor = db_connection.cursor()
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
except Exception, e:
module.fail_json(msg="unable to connect to database: %s" % e)

@ -79,7 +79,7 @@ options:
description:
- DEPRECATED. The acl to set or remove. This must always be quoted in the form of '<etype>:<qualifier>:<perms>'. The qualifier may be empty for some types, but the type and perms are always requried. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields.
author: Brian Coca
author: "Brian Coca (@bcoca)"
notes:
- The "acl" module requires that acls are enabled on the target filesystem and that the setfacl and getfacl binaries are installed.
'''

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save