Remove things in core, leaving only extras

reviewable/pr18780/r1
Michael DeHaan 10 years ago
parent ab4977e909
commit c1067e329a

@ -1,484 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: azure
short_description: create or terminate a virtual machine in azure
description:
- Creates or terminates azure instances. When created optionally waits for it to be 'running'. This module has a dependency on python-azure >= 0.7.1
version_added: "1.7"
options:
name:
description:
- name of the virtual machine and associated cloud service.
required: true
default: null
location:
description:
- the azure location to use (e.g. 'East US')
required: true
default: null
subscription_id:
description:
- azure subscription id. Overrides the AZURE_SUBSCRIPTION_ID environement variable.
required: false
default: null
management_cert_path:
description:
- path to an azure management certificate associated with the subscription id. Overrides the AZURE_CERT_PATH environement variable.
required: false
default: null
storage_account:
description:
- the azure storage account in which to store the data disks.
required: true
image:
description:
- system image for creating the virtual machine (e.g., b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB)
required: true
default: null
role_size:
description:
- azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6)
required: false
default: Small
endpoints:
description:
- a comma-separated list of TCP ports to expose on the virtual machine (e.g., "22,80")
required: false
default: 22
user:
description:
- the unix username for the new virtual machine.
required: false
default: null
password:
description:
- the unix password for the new virtual machine.
required: false
default: null
ssh_cert_path:
description:
- path to an X509 certificate containing the public ssh key to install in the virtual machine. See http://www.windowsazure.com/en-us/manage/linux/tutorials/intro-to-linux/ for more details.
- if this option is specified, password-based ssh authentication will be disabled.
required: false
default: null
virtual_network_name:
description:
- Name of virtual network.
required: false
default: null
hostname:
description:
- hostname to write /etc/hostname. Defaults to <name>.cloudapp.net.
required: false
default: null
wait:
description:
- wait for the instance to be in state 'running' before returning
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
aliases: []
wait_timeout_redirects:
description:
- how long before wait gives up for redirects, in seconds
default: 300
aliases: []
state:
description:
- create or terminate instances
required: false
default: 'present'
aliases: []
requirements: [ "azure" ]
author: John Whitbeck
'''
EXAMPLES = '''
# Note: None of these examples set subscription_id or management_cert_path
# It is assumed that their matching environment variables are set.
# Provision virtual machine example
- local_action:
module: azure
name: my-virtual-machine
role_size: Small
image: b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB
location: 'East US'
user: ubuntu
ssh_cert_path: /path/to/azure_x509_cert.pem
storage_account: my-storage-account
wait: yes
# Terminate virtual machine example
- local_action:
module: azure
name: my-virtual-machine
state: absent
'''
import base64
import datetime
import os
import sys
import time
from urlparse import urlparse
AZURE_LOCATIONS = ['South Central US',
'Central US',
'East US 2',
'East US',
'West US',
'North Central US',
'North Europe',
'West Europe',
'East Asia',
'Southeast Asia',
'Japan West',
'Japan East',
'Brazil South']
AZURE_ROLE_SIZES = ['ExtraSmall',
'Small',
'Medium',
'Large',
'ExtraLarge',
'A5',
'A6',
'A7',
'A8',
'A9',
'Basic_A0',
'Basic_A1',
'Basic_A2',
'Basic_A3',
'Basic_A4']
try:
import azure as windows_azure
from azure import WindowsAzureError, WindowsAzureMissingResourceError
from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys,
PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints,
ConfigurationSetInputEndpoint)
except ImportError:
print "failed=True msg='azure required for this module'"
sys.exit(1)
from distutils.version import LooseVersion
from types import MethodType
import json
def _wait_for_completion(azure, promise, wait_timeout, msg):
if not promise: return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
operation_result = azure.get_operation_status(promise.request_id)
time.sleep(5)
if operation_result.status == "Succeeded":
return
raise WindowsAzureError('Timed out waiting for async operation ' + msg + ' "' + str(promise.request_id) + '" to complete.')
def get_ssh_certificate_tokens(module, ssh_cert_path):
"""
Returns the sha1 fingerprint and a base64-encoded PKCS12 version of the certificate.
"""
# This returns a string such as SHA1 Fingerprint=88:60:0B:13:A9:14:47:DA:4E:19:10:7D:34:92:2B:DF:A1:7D:CA:FF
rc, stdout, stderr = module.run_command(['openssl', 'x509', '-in', ssh_cert_path, '-fingerprint', '-noout'])
if rc != 0:
module.fail_json(msg="failed to generate the key fingerprint, error was: %s" % stderr)
fingerprint = stdout.strip()[17:].replace(':', '')
rc, stdout, stderr = module.run_command(['openssl', 'pkcs12', '-export', '-in', ssh_cert_path, '-nokeys', '-password', 'pass:'])
if rc != 0:
module.fail_json(msg="failed to generate the pkcs12 signature from the certificate, error was: %s" % stderr)
pkcs12_base64 = base64.b64encode(stdout.strip())
return (fingerprint, pkcs12_base64)
def create_virtual_machine(module, azure):
"""
Create new virtual machine
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Returns:
True if a new virtual machine was created, false otherwise
"""
name = module.params.get('name')
hostname = module.params.get('hostname') or name + ".cloudapp.net"
endpoints = module.params.get('endpoints').split(',')
ssh_cert_path = module.params.get('ssh_cert_path')
user = module.params.get('user')
password = module.params.get('password')
location = module.params.get('location')
role_size = module.params.get('role_size')
storage_account = module.params.get('storage_account')
image = module.params.get('image')
virtual_network_name = module.params.get('virtual_network_name')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
# Check if a deployment with the same name already exists
cloud_service_name_available = azure.check_hosted_service_name_availability(name)
if not cloud_service_name_available.result:
changed = False
else:
changed = True
# Create cloud service if necessary
try:
result = azure.create_hosted_service(service_name=name, label=name, location=location)
_wait_for_completion(azure, result, wait_timeout, "create_hosted_service")
except WindowsAzureError as e:
module.fail_json(msg="failed to create the new service name, it already exists: %s" % str(e))
# Create linux configuration
disable_ssh_password_authentication = not password
linux_config = LinuxConfigurationSet(hostname, user, password, disable_ssh_password_authentication)
# Add ssh certificates if specified
if ssh_cert_path:
fingerprint, pkcs12_base64 = get_ssh_certificate_tokens(module, ssh_cert_path)
# Add certificate to cloud service
result = azure.add_service_certificate(name, pkcs12_base64, 'pfx', '')
_wait_for_completion(azure, result, wait_timeout, "add_service_certificate")
# Create ssh config
ssh_config = SSH()
ssh_config.public_keys = PublicKeys()
authorized_keys_path = u'/home/%s/.ssh/authorized_keys' % user
ssh_config.public_keys.public_keys.append(PublicKey(path=authorized_keys_path, fingerprint=fingerprint))
# Append ssh config to linux machine config
linux_config.ssh = ssh_config
# Create network configuration
network_config = ConfigurationSetInputEndpoints()
network_config.configuration_set_type = 'NetworkConfiguration'
network_config.subnet_names = []
for port in endpoints:
network_config.input_endpoints.append(ConfigurationSetInputEndpoint(name='TCP-%s' % port,
protocol='TCP',
port=port,
local_port=port))
# First determine where to store disk
today = datetime.date.today().strftime('%Y-%m-%d')
disk_prefix = u'%s-%s' % (name, name)
media_link = u'http://%s.blob.core.windows.net/vhds/%s-%s.vhd' % (storage_account, disk_prefix, today)
# Create system hard disk
os_hd = OSVirtualHardDisk(image, media_link)
# Spin up virtual machine
try:
result = azure.create_virtual_machine_deployment(service_name=name,
deployment_name=name,
deployment_slot='production',
label=name,
role_name=name,
system_config=linux_config,
network_config=network_config,
os_virtual_hard_disk=os_hd,
role_size=role_size,
role_type='PersistentVMRole',
virtual_network_name=virtual_network_name)
_wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment")
except WindowsAzureError as e:
module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e))
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
return (changed, urlparse(deployment.url).hostname, deployment)
except WindowsAzureError as e:
module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e)))
def terminate_virtual_machine(module, azure):
"""
Terminates a virtual machine
module : AnsibleModule object
azure: authenticated azure ServiceManagementService object
Not yet supported: handle deletion of attached data disks.
Returns:
True if a new virtual machine was deleted, false otherwise
"""
# Whether to wait for termination to complete before returning
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
name = module.params.get('name')
delete_empty_services = module.params.get('delete_empty_services')
changed = False
deployment = None
public_dns_name = None
disk_names = []
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
except WindowsAzureMissingResourceError as e:
pass # no such deployment or service
except WindowsAzureError as e:
module.fail_json(msg="failed to find the deployment, error was: %s" % str(e))
# Delete deployment
if deployment:
changed = True
try:
# gather disk info
results = []
for role in deployment.role_list:
role_props = azure.get_role(name, deployment.name, role.role_name)
if role_props.os_virtual_hard_disk.disk_name not in disk_names:
disk_names.append(role_props.os_virtual_hard_disk.disk_name)
result = azure.delete_deployment(name, deployment.name)
_wait_for_completion(azure, result, wait_timeout, "delete_deployment")
for disk_name in disk_names:
azure.delete_disk(disk_name, True)
# Now that the vm is deleted, remove the cloud service
result = azure.delete_hosted_service(service_name=name)
_wait_for_completion(azure, result, wait_timeout, "delete_hosted_service")
except WindowsAzureError as e:
module.fail_json(msg="failed to delete the service %s, error was: %s" % (name, str(e)))
public_dns_name = urlparse(deployment.url).hostname
return changed, public_dns_name, deployment
def get_azure_creds(module):
# Check modul args for credentials, then check environment vars
subscription_id = module.params.get('subscription_id')
if not subscription_id:
subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID', None)
if not subscription_id:
module.fail_json(msg="No subscription_id provided. Please set 'AZURE_SUBSCRIPTION_ID' or use the 'subscription_id' parameter")
management_cert_path = module.params.get('management_cert_path')
if not management_cert_path:
management_cert_path = os.environ.get('AZURE_CERT_PATH', None)
if not management_cert_path:
module.fail_json(msg="No management_cert_path provided. Please set 'AZURE_CERT_PATH' or use the 'management_cert_path' parameter")
return subscription_id, management_cert_path
def main():
module = AnsibleModule(
argument_spec=dict(
ssh_cert_path=dict(),
name=dict(),
hostname=dict(),
location=dict(choices=AZURE_LOCATIONS),
role_size=dict(choices=AZURE_ROLE_SIZES),
subscription_id=dict(no_log=True),
storage_account=dict(),
management_cert_path=dict(),
endpoints=dict(default='22'),
user=dict(),
password=dict(),
image=dict(),
virtual_network_name=dict(default=None),
state=dict(default='present'),
wait=dict(type='bool', default=False),
wait_timeout=dict(default=600),
wait_timeout_redirects=dict(default=300)
)
)
# create azure ServiceManagementService object
subscription_id, management_cert_path = get_azure_creds(module)
wait_timeout_redirects = int(module.params.get('wait_timeout_redirects'))
if LooseVersion(windows_azure.__version__) <= "0.8.0":
# wrapper for handling redirects which the sdk <= 0.8.0 is not following
azure = Wrapper(ServiceManagementService(subscription_id, management_cert_path), wait_timeout_redirects)
else:
azure = ServiceManagementService(subscription_id, management_cert_path)
cloud_service_raw = None
if module.params.get('state') == 'absent':
(changed, public_dns_name, deployment) = terminate_virtual_machine(module, azure)
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('name'):
module.fail_json(msg='name parameter is required for new instance')
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if not module.params.get('user'):
module.fail_json(msg='user parameter is required for new instance')
if not module.params.get('location'):
module.fail_json(msg='location parameter is required for new instance')
if not module.params.get('storage_account'):
module.fail_json(msg='storage_account parameter is required for new instance')
(changed, public_dns_name, deployment) = create_virtual_machine(module, azure)
module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__)))
class Wrapper(object):
def __init__(self, obj, wait_timeout):
self.other = obj
self.wait_timeout = wait_timeout
def __getattr__(self, name):
if hasattr(self.other, name):
func = getattr(self.other, name)
return lambda *args, **kwargs: self._wrap(func, args, kwargs)
raise AttributeError(name)
def _wrap(self, func, args, kwargs):
if type(func) == MethodType:
result = self._handle_temporary_redirects(lambda: func(*args, **kwargs))
else:
result = self._handle_temporary_redirects(lambda: func(self.other, *args, **kwargs))
return result
def _handle_temporary_redirects(self, f):
wait_timeout = time.time() + self.wait_timeout
while wait_timeout > time.time():
try:
return f()
except WindowsAzureError as e:
if not str(e).lower().find("temporary redirect") == -1:
time.sleep(5)
pass
else:
raise e
# import module snippets
from ansible.module_utils.basic import *
main()

@ -1,313 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cloudformation
short_description: create a AWS CloudFormation stack
description:
- Launches an AWS CloudFormation stack and waits for it complete.
version_added: "1.1"
options:
stack_name:
description:
- name of the cloudformation stack
required: true
default: null
aliases: []
disable_rollback:
description:
- If a stacks fails to form, rollback will remove the stack
required: false
default: "false"
choices: [ "true", "false" ]
aliases: []
template_parameters:
description:
- a list of hashes of all the template variables for the stack
required: false
default: {}
aliases: []
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: ['aws_region', 'ec2_region']
state:
description:
- If state is "present", stack will be created. If state is "present" and if stack exists and template has changed, it will be updated.
If state is absent, stack will be removed.
required: true
default: null
aliases: []
template:
description:
- the path of the cloudformation template
required: true
default: null
aliases: []
tags:
description:
- Dictionary of tags to associate with stack and it's resources during stack creation. Cannot be updated later.
Requires at least Boto version 2.6.0.
required: false
default: null
aliases: []
version_added: "1.4"
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
version_added: "1.5"
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
version_added: "1.5"
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
version_added: "1.5"
requirements: [ "boto" ]
author: James S. Martin
'''
EXAMPLES = '''
# Basic task example
tasks:
- name: launch ansible cloudformation example
action: cloudformation >
stack_name="ansible-cloudformation" state=present
region=us-east-1 disable_rollback=true
template=files/cloudformation-example.json
args:
template_parameters:
KeyName: jmartin
DiskType: ephemeral
InstanceType: m1.small
ClusterSize: 3
tags:
Stack: ansible-cloudformation
'''
import json
import time
try:
import boto
import boto.cloudformation.connection
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
class Region:
def __init__(self, region):
'''connects boto to the region specified in the cloudformation template'''
self.name = region
self.endpoint = 'cloudformation.%s.amazonaws.com' % region
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def boto_version_required(version_tuple):
parts = boto.Version.split('.')
boto_version = []
try:
for part in parts:
boto_version.append(int(part))
except:
boto_version.append(-1)
return tuple(boto_version) >= tuple(version_tuple)
def stack_operation(cfn, stack_name, operation):
'''gets the status of a stack while it is created/updated/deleted'''
existed = []
result = {}
operation_complete = False
while operation_complete == False:
try:
stack = cfn.describe_stacks(stack_name)[0]
existed.append('yes')
except:
if 'yes' in existed:
result = dict(changed=True,
output='Stack Deleted',
events=map(str, list(stack.describe_events())))
else:
result = dict(changed= True, output='Stack Not Found')
break
if '%s_COMPLETE' % operation == stack.stack_status:
result = dict(changed=True,
events = map(str, list(stack.describe_events())),
output = 'Stack %s complete' % operation)
break
if 'ROLLBACK_COMPLETE' == stack.stack_status or '%s_ROLLBACK_COMPLETE' % operation == stack.stack_status:
result = dict(changed=True, failed=True,
events = map(str, list(stack.describe_events())),
output = 'Problem with %s. Rollback complete' % operation)
break
elif '%s_FAILED' % operation == stack.stack_status:
result = dict(changed=True, failed=True,
events = map(str, list(stack.describe_events())),
output = 'Stack %s failed' % operation)
break
else:
time.sleep(5)
return result
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
stack_name=dict(required=True),
template_parameters=dict(required=False, type='dict', default={}),
state=dict(default='present', choices=['present', 'absent']),
template=dict(default=None, required=True),
disable_rollback=dict(default=False, type='bool'),
tags=dict(default=None)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
state = module.params['state']
stack_name = module.params['stack_name']
template_body = open(module.params['template'], 'r').read()
disable_rollback = module.params['disable_rollback']
template_parameters = module.params['template_parameters']
tags = module.params['tags']
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
kwargs = dict()
if tags is not None:
if not boto_version_required((2,6,0)):
module.fail_json(msg='Module parameter "tags" requires at least Boto version 2.6.0')
kwargs['tags'] = tags
# convert the template parameters ansible passes into a tuple for boto
template_parameters_tup = [(k, v) for k, v in template_parameters.items()]
stack_outputs = {}
try:
cf_region = Region(region)
cfn = boto.cloudformation.connection.CloudFormationConnection(
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
region=cf_region,
)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
update = False
result = {}
operation = None
# if state is present we are going to ensure that the stack is either
# created or updated
if state == 'present':
try:
cfn.create_stack(stack_name, parameters=template_parameters_tup,
template_body=template_body,
disable_rollback=disable_rollback,
capabilities=['CAPABILITY_IAM'],
**kwargs)
operation = 'CREATE'
except Exception, err:
error_msg = boto_exception(err)
if 'AlreadyExistsException' in error_msg or 'already exists' in error_msg:
update = True
else:
module.fail_json(msg=error_msg)
if not update:
result = stack_operation(cfn, stack_name, operation)
# if the state is present and the stack already exists, we try to update it
# AWS will tell us if the stack template and parameters are the same and
# don't need to be updated.
if update:
try:
cfn.update_stack(stack_name, parameters=template_parameters_tup,
template_body=template_body,
disable_rollback=disable_rollback,
capabilities=['CAPABILITY_IAM'])
operation = 'UPDATE'
except Exception, err:
error_msg = boto_exception(err)
if 'No updates are to be performed.' in error_msg:
result = dict(changed=False, output='Stack is already up-to-date.')
else:
module.fail_json(msg=error_msg)
if operation == 'UPDATE':
result = stack_operation(cfn, stack_name, operation)
# check the status of the stack while we are creating/updating it.
# and get the outputs of the stack
if state == 'present' or update:
stack = cfn.describe_stacks(stack_name)[0]
for output in stack.outputs:
stack_outputs[output.key] = output.value
result['stack_outputs'] = stack_outputs
# absent state is different because of the way delete_stack works.
# problem is it it doesn't give an error if stack isn't found
# so must describe the stack first
if state == 'absent':
try:
cfn.describe_stacks(stack_name)
operation = 'DELETE'
except Exception, err:
error_msg = boto_exception(err)
if 'Stack:%s does not exist' % stack_name in error_msg:
result = dict(changed=False, output='Stack not found.')
else:
module.fail_json(msg=error_msg)
if operation == 'DELETE':
cfn.delete_stack(stack_name)
result = stack_operation(cfn, stack_name, operation)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -1,434 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: digital_ocean
short_description: Create/delete a droplet/SSH_key in DigitalOcean
description:
- Create/delete a droplet in DigitalOcean and optionally wait for it to be 'running', or deploy an SSH key.
version_added: "1.3"
options:
command:
description:
- Which target you want to operate on.
default: droplet
choices: ['droplet', 'ssh']
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'active', 'absent', 'deleted']
client_id:
description:
- DigitalOcean manager id.
api_key:
description:
- DigitalOcean api key.
id:
description:
- Numeric, the droplet id you want to operate on.
name:
description:
- String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key.
unique_name:
description:
- Bool, require unique hostnames. By default, DigitalOcean allows multiple hosts with the same name. Setting this to "yes" allows only one host per name. Useful for idempotence.
version_added: "1.4"
default: "no"
choices: [ "yes", "no" ]
size_id:
description:
- Numeric, this is the id of the size you would like the droplet created with.
image_id:
description:
- Numeric, this is the id of the image you would like the droplet created with.
region_id:
description:
- "Numeric, this is the id of the region you would like your server to be created in."
ssh_key_ids:
description:
- Optional, comma separated list of ssh_key_ids that you would like to be added to the server.
virtio:
description:
- "Bool, turn on virtio driver in droplet for improved network and storage I/O."
version_added: "1.4"
default: "yes"
choices: [ "yes", "no" ]
private_networking:
description:
- "Bool, add an additional, private network interface to droplet for inter-droplet communication."
version_added: "1.4"
default: "no"
choices: [ "yes", "no" ]
backups_enabled:
description:
- Optional, Boolean, enables backups for your droplet.
version_added: "1.6"
default: "no"
choices: [ "yes", "no" ]
wait:
description:
- Wait for the droplet to be in state 'running' before returning. If wait is "no" an ip_address may not be returned.
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 300
ssh_pub_key:
description:
- The public SSH key you want to add to your account.
notes:
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
requirements: [ dopy ]
'''
EXAMPLES = '''
# Ensure a SSH key is present
# If a key matches this name, will return the ssh key id and changed = False
# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False
- digital_ocean: >
state=present
command=ssh
name=my_ssh_key
ssh_pub_key='ssh-rsa AAAA...'
client_id=XXX
api_key=XXX
# Create a new Droplet
# Will return the droplet details including the droplet id (used for idempotence)
- digital_ocean: >
state=present
command=droplet
name=mydroplet
client_id=XXX
api_key=XXX
size_id=1
region_id=2
image_id=3
wait_timeout=500
register: my_droplet
- debug: msg="ID is {{ my_droplet.droplet.id }}"
- debug: msg="IP is {{ my_droplet.droplet.ip_address }}"
# Ensure a droplet is present
# If droplet id already exist, will return the droplet details and changed = False
# If no droplet matches the id, a new droplet will be created and the droplet details (including the new id) are returned, changed = True.
- digital_ocean: >
state=present
command=droplet
id=123
name=mydroplet
client_id=XXX
api_key=XXX
size_id=1
region_id=2
image_id=3
wait_timeout=500
# Create a droplet with ssh key
# The ssh key id can be passed as argument at the creation of a droplet (see ssh_key_ids).
# Several keys can be added to ssh_key_ids as id1,id2,id3
# The keys are used to connect as root to the droplet.
- digital_ocean: >
state=present
ssh_key_ids=id1,id2
name=mydroplet
client_id=XXX
api_key=XXX
size_id=1
region_id=2
image_id=3
'''
import sys
import os
import time
try:
import dopy
from dopy.manager import DoError, DoManager
except ImportError, e:
print "failed=True msg='dopy >= 0.2.3 required for this module'"
sys.exit(1)
if dopy.__version__ < '0.2.3':
print "failed=True msg='dopy >= 0.2.3 required for this module'"
sys.exit(1)
class TimeoutError(DoError):
def __init__(self, msg, id):
super(TimeoutError, self).__init__(msg)
self.id = id
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
class Droplet(JsonfyMixIn):
manager = None
def __init__(self, droplet_json):
self.status = 'new'
self.__dict__.update(droplet_json)
def is_powered_on(self):
return self.status == 'active'
def update_attr(self, attrs=None):
if attrs:
for k, v in attrs.iteritems():
setattr(self, k, v)
else:
json = self.manager.show_droplet(self.id)
if json['ip_address']:
self.update_attr(json)
def power_on(self):
assert self.status == 'off', 'Can only power on a closed one.'
json = self.manager.power_on_droplet(self.id)
self.update_attr(json)
def ensure_powered_on(self, wait=True, wait_timeout=300):
if self.is_powered_on():
return
if self.status == 'off': # powered off
self.power_on()
if wait:
end_time = time.time() + wait_timeout
while time.time() < end_time:
time.sleep(min(20, end_time - time.time()))
self.update_attr()
if self.is_powered_on():
if not self.ip_address:
raise TimeoutError('No ip is found.', self.id)
return
raise TimeoutError('Wait for droplet running timeout', self.id)
def destroy(self):
return self.manager.destroy_droplet(self.id, scrub_data=True)
@classmethod
def setup(cls, client_id, api_key):
cls.manager = DoManager(client_id, api_key)
@classmethod
def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False):
json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking, backups_enabled)
droplet = cls(json)
return droplet
@classmethod
def find(cls, id=None, name=None):
if not id and not name:
return False
droplets = cls.list_all()
# Check first by id. digital ocean requires that it be unique
for droplet in droplets:
if droplet.id == id:
return droplet
# Failing that, check by hostname.
for droplet in droplets:
if droplet.name == name:
return droplet
return False
@classmethod
def list_all(cls):
json = cls.manager.all_active_droplets()
return map(cls, json)
class SSH(JsonfyMixIn):
manager = None
def __init__(self, ssh_key_json):
self.__dict__.update(ssh_key_json)
update_attr = __init__
def destroy(self):
self.manager.destroy_ssh_key(self.id)
return True
@classmethod
def setup(cls, client_id, api_key):
cls.manager = DoManager(client_id, api_key)
@classmethod
def find(cls, name):
if not name:
return False
keys = cls.list_all()
for key in keys:
if key.name == name:
return key
return False
@classmethod
def list_all(cls):
json = cls.manager.all_ssh_keys()
return map(cls, json)
@classmethod
def add(cls, name, key_pub):
json = cls.manager.new_ssh_key(name, key_pub)
return cls(json)
def core(module):
def getkeyordie(k):
v = module.params[k]
if v is None:
module.fail_json(msg='Unable to load %s' % k)
return v
try:
# params['client_id'] will be None even if client_id is not passed in
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
except KeyError, e:
module.fail_json(msg='Unable to load %s' % e.message)
changed = True
command = module.params['command']
state = module.params['state']
if command == 'droplet':
Droplet.setup(client_id, api_key)
if state in ('active', 'present'):
# First, try to find a droplet by id.
droplet = Droplet.find(id=module.params['id'])
# If we couldn't find the droplet and the user is allowing unique
# hostnames, then check to see if a droplet with the specified
# hostname already exists.
if not droplet and module.params['unique_name']:
droplet = Droplet.find(name=getkeyordie('name'))
# If both of those attempts failed, then create a new droplet.
if not droplet:
droplet = Droplet.add(
name=getkeyordie('name'),
size_id=getkeyordie('size_id'),
image_id=getkeyordie('image_id'),
region_id=getkeyordie('region_id'),
ssh_key_ids=module.params['ssh_key_ids'],
virtio=module.params['virtio'],
private_networking=module.params['private_networking'],
backups_enabled=module.params['backups_enabled'],
)
if droplet.is_powered_on():
changed = False
droplet.ensure_powered_on(
wait=getkeyordie('wait'),
wait_timeout=getkeyordie('wait_timeout')
)
module.exit_json(changed=changed, droplet=droplet.to_json())
elif state in ('absent', 'deleted'):
# First, try to find a droplet by id.
droplet = Droplet.find(module.params['id'])
# If we couldn't find the droplet and the user is allowing unique
# hostnames, then check to see if a droplet with the specified
# hostname already exists.
if not droplet and module.params['unique_name']:
droplet = Droplet.find(name=getkeyordie('name'))
if not droplet:
module.exit_json(changed=False, msg='The droplet is not found.')
event_json = droplet.destroy()
module.exit_json(changed=True, event_id=event_json['event_id'])
elif command == 'ssh':
SSH.setup(client_id, api_key)
name = getkeyordie('name')
if state in ('active', 'present'):
key = SSH.find(name)
if key:
module.exit_json(changed=False, ssh_key=key.to_json())
key = SSH.add(name, getkeyordie('ssh_pub_key'))
module.exit_json(changed=True, ssh_key=key.to_json())
elif state in ('absent', 'deleted'):
key = SSH.find(name)
if not key:
module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name)
key.destroy()
module.exit_json(changed=True)
def main():
module = AnsibleModule(
argument_spec = dict(
command = dict(choices=['droplet', 'ssh'], default='droplet'),
state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'),
client_id = dict(aliases=['CLIENT_ID'], no_log=True),
api_key = dict(aliases=['API_KEY'], no_log=True),
name = dict(type='str'),
size_id = dict(type='int'),
image_id = dict(type='int'),
region_id = dict(type='int'),
ssh_key_ids = dict(default=''),
virtio = dict(type='bool', default='yes'),
private_networking = dict(type='bool', default='no'),
backups_enabled = dict(type='bool', default='no'),
id = dict(aliases=['droplet_id'], type='int'),
unique_name = dict(type='bool', default='no'),
wait = dict(type='bool', default=True),
wait_timeout = dict(default=300, type='int'),
ssh_pub_key = dict(type='str'),
),
required_together = (
['size_id', 'image_id', 'region_id'],
),
mutually_exclusive = (
['size_id', 'ssh_pub_key'],
['image_id', 'ssh_pub_key'],
['region_id', 'ssh_pub_key'],
),
required_one_of = (
['id', 'name'],
),
)
try:
core(module)
except TimeoutError, e:
module.fail_json(msg=str(e), id=e.id)
except (DoError, Exception), e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
main()

@ -1,242 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: digital_ocean_domain
short_description: Create/delete a DNS record in DigitalOcean
description:
- Create/delete a DNS record in DigitalOcean.
version_added: "1.6"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'active', 'absent', 'deleted']
client_id:
description:
- DigitalOcean manager id.
api_key:
description:
- DigitalOcean api key.
id:
description:
- Numeric, the droplet id you want to operate on.
name:
description:
- String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key, or the name of a domain.
ip:
description:
- The IP address to point a domain at.
notes:
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
'''
EXAMPLES = '''
# Create a domain record
- digital_ocean_domain: >
state=present
name=my.digitalocean.domain
ip=127.0.0.1
# Create a droplet and a corresponding domain record
- digital_ocean: >
state=present
name=test_droplet
size_id=1
region_id=2
image_id=3
register: test_droplet
- digital_ocean_domain: >
state=present
name={{ test_droplet.droplet.name }}.my.domain
ip={{ test_droplet.droplet.ip_address }}
'''
import sys
import os
import time
try:
from dopy.manager import DoError, DoManager
except ImportError as e:
print "failed=True msg='dopy required for this module'"
sys.exit(1)
class TimeoutError(DoError):
def __init__(self, msg, id):
super(TimeoutError, self).__init__(msg)
self.id = id
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
class DomainRecord(JsonfyMixIn):
manager = None
def __init__(self, json):
self.__dict__.update(json)
update_attr = __init__
def update(self, data = None, record_type = None):
json = self.manager.edit_domain_record(self.domain_id,
self.id,
record_type if record_type is not None else self.record_type,
data if data is not None else self.data)
self.__dict__.update(json)
return self
def destroy(self):
json = self.manager.destroy_domain_record(self.domain_id, self.id)
return json
class Domain(JsonfyMixIn):
manager = None
def __init__(self, domain_json):
self.__dict__.update(domain_json)
def destroy(self):
self.manager.destroy_domain(self.id)
def records(self):
json = self.manager.all_domain_records(self.id)
return map(DomainRecord, json)
@classmethod
def add(cls, name, ip):
json = cls.manager.new_domain(name, ip)
return cls(json)
@classmethod
def setup(cls, client_id, api_key):
cls.manager = DoManager(client_id, api_key)
DomainRecord.manager = cls.manager
@classmethod
def list_all(cls):
domains = cls.manager.all_domains()
return map(cls, domains)
@classmethod
def find(cls, name=None, id=None):
if name is None and id is None:
return False
domains = Domain.list_all()
if id is not None:
for domain in domains:
if domain.id == id:
return domain
if name is not None:
for domain in domains:
if domain.name == name:
return domain
return False
def core(module):
def getkeyordie(k):
v = module.params[k]
if v is None:
module.fail_json(msg='Unable to load %s' % k)
return v
try:
# params['client_id'] will be None even if client_id is not passed in
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
except KeyError, e:
module.fail_json(msg='Unable to load %s' % e.message)
changed = True
state = module.params['state']
Domain.setup(client_id, api_key)
if state in ('present'):
domain = Domain.find(id=module.params["id"])
if not domain:
domain = Domain.find(name=getkeyordie("name"))
if not domain:
domain = Domain.add(getkeyordie("name"),
getkeyordie("ip"))
module.exit_json(changed=True, domain=domain.to_json())
else:
records = domain.records()
at_record = None
for record in records:
if record.name == "@":
at_record = record
if not at_record.data == getkeyordie("ip"):
record.update(data=getkeyordie("ip"), record_type='A')
module.exit_json(changed=True, domain=Domain.find(id=record.domain_id).to_json())
module.exit_json(changed=False, domain=domain.to_json())
elif state in ('absent'):
domain = None
if "id" in module.params:
domain = Domain.find(id=module.params["id"])
if not domain and "name" in module.params:
domain = Domain.find(name=module.params["name"])
if not domain:
module.exit_json(changed=False, msg="Domain not found.")
event_json = domain.destroy()
module.exit_json(changed=True, event=event_json)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'),
client_id = dict(aliases=['CLIENT_ID'], no_log=True),
api_key = dict(aliases=['API_KEY'], no_log=True),
name = dict(type='str'),
id = dict(aliases=['droplet_id'], type='int'),
ip = dict(type='str'),
),
required_one_of = (
['id', 'name'],
),
)
try:
core(module)
except TimeoutError as e:
module.fail_json(msg=str(e), id=e.id)
except (DoError, Exception) as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
main()

@ -1,178 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: digital_ocean_sshkey
short_description: Create/delete an SSH key in DigitalOcean
description:
- Create/delete an SSH key.
version_added: "1.6"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
client_id:
description:
- DigitalOcean manager id.
api_key:
description:
- DigitalOcean api key.
id:
description:
- Numeric, the SSH key id you want to operate on.
name:
description:
- String, this is the name of an SSH key to create or destroy.
ssh_pub_key:
description:
- The public SSH key you want to add to your account.
notes:
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
'''
EXAMPLES = '''
# Ensure a SSH key is present
# If a key matches this name, will return the ssh key id and changed = False
# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False
- digital_ocean_sshkey: >
state=present
name=my_ssh_key
ssh_pub_key='ssh-rsa AAAA...'
client_id=XXX
api_key=XXX
'''
import sys
import os
import time
try:
from dopy.manager import DoError, DoManager
except ImportError as e:
print "failed=True msg='dopy required for this module'"
sys.exit(1)
class TimeoutError(DoError):
def __init__(self, msg, id):
super(TimeoutError, self).__init__(msg)
self.id = id
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
class SSH(JsonfyMixIn):
manager = None
def __init__(self, ssh_key_json):
self.__dict__.update(ssh_key_json)
update_attr = __init__
def destroy(self):
self.manager.destroy_ssh_key(self.id)
return True
@classmethod
def setup(cls, client_id, api_key):
cls.manager = DoManager(client_id, api_key)
@classmethod
def find(cls, name):
if not name:
return False
keys = cls.list_all()
for key in keys:
if key.name == name:
return key
return False
@classmethod
def list_all(cls):
json = cls.manager.all_ssh_keys()
return map(cls, json)
@classmethod
def add(cls, name, key_pub):
json = cls.manager.new_ssh_key(name, key_pub)
return cls(json)
def core(module):
def getkeyordie(k):
v = module.params[k]
if v is None:
module.fail_json(msg='Unable to load %s' % k)
return v
try:
# params['client_id'] will be None even if client_id is not passed in
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
except KeyError, e:
module.fail_json(msg='Unable to load %s' % e.message)
changed = True
state = module.params['state']
SSH.setup(client_id, api_key)
name = getkeyordie('name')
if state in ('present'):
key = SSH.find(name)
if key:
module.exit_json(changed=False, ssh_key=key.to_json())
key = SSH.add(name, getkeyordie('ssh_pub_key'))
module.exit_json(changed=True, ssh_key=key.to_json())
elif state in ('absent'):
key = SSH.find(name)
if not key:
module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name)
key.destroy()
module.exit_json(changed=True)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(choices=['present', 'absent'], default='present'),
client_id = dict(aliases=['CLIENT_ID'], no_log=True),
api_key = dict(aliases=['API_KEY'], no_log=True),
name = dict(type='str'),
id = dict(aliases=['droplet_id'], type='int'),
ssh_pub_key = dict(type='str'),
),
required_one_of = (
['id', 'name'],
),
)
try:
core(module)
except TimeoutError as e:
module.fail_json(msg=str(e), id=e.id)
except (DoError, Exception) as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
main()

@ -1,854 +0,0 @@
#!/usr/bin/python
# (c) 2013, Cove Schneider
# (c) 2014, Joshua Conner <joshua.conner@gmail.com>
# (c) 2014, Pavel Antonov <antonov@adwz.ru>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
DOCUMENTATION = '''
---
module: docker
version_added: "1.4"
short_description: manage docker containers
description:
- Manage the life cycle of docker containers.
options:
count:
description:
- Set number of containers to run
required: False
default: 1
aliases: []
image:
description:
- Set container image to use
required: true
default: null
aliases: []
command:
description:
- Set command to run in a container on startup
required: false
default: null
aliases: []
name:
description:
- Set name for container (used to find single container or to provide links)
required: false
default: null
aliases: []
version_added: "1.5"
ports:
description:
- Set private to public port mapping specification using docker CLI-style syntax [([<host_interface>:[host_port]])|(<host_port>):]<container_port>[/udp]
required: false
default: null
aliases: []
version_added: "1.5"
expose:
description:
- Set container ports to expose for port mappings or links. (If the port is already exposed using EXPOSE in a Dockerfile, you don't need to expose it again.)
required: false
default: null
aliases: []
version_added: "1.5"
publish_all_ports:
description:
- Publish all exposed ports to the host interfaces
required: false
default: false
aliases: []
version_added: "1.5"
volumes:
description:
- Set volume(s) to mount on the container
required: false
default: null
aliases: []
volumes_from:
description:
- Set shared volume(s) from another container
required: false
default: null
aliases: []
links:
description:
- Link container(s) to other container(s) (e.g. links=redis,postgresql:db)
required: false
default: null
aliases: []
version_added: "1.5"
memory_limit:
description:
- Set RAM allocated to container
required: false
default: null
aliases: []
default: 256MB
docker_url:
description:
- URL of docker host to issue commands to
required: false
default: unix://var/run/docker.sock
aliases: []
docker_api_version:
description:
- Remote API version to use. This defaults to the current default as specified by docker-py.
required: false
default: docker-py default remote API version
aliases: []
version_added: "1.8"
username:
description:
- Set remote API username
required: false
default: null
aliases: []
password:
description:
- Set remote API password
required: false
default: null
aliases: []
hostname:
description:
- Set container hostname
required: false
default: null
aliases: []
env:
description:
- Set environment variables (e.g. env="PASSWORD=sEcRe7,WORKERS=4")
required: false
default: null
aliases: []
dns:
description:
- Set custom DNS servers for the container
required: false
default: null
aliases: []
detach:
description:
- Enable detached mode on start up, leaves container running in background
required: false
default: true
aliases: []
state:
description:
- Set the state of the container
required: false
default: present
choices: [ "present", "running", "stopped", "absent", "killed", "restarted" ]
aliases: []
privileged:
description:
- Set whether the container should run in privileged mode
required: false
default: false
aliases: []
lxc_conf:
description:
- LXC config parameters, e.g. lxc.aa_profile:unconfined
required: false
default:
aliases: []
name:
description:
- Set the name of the container (cannot use with count)
required: false
default: null
aliases: []
version_added: "1.5"
stdin_open:
description:
- Keep stdin open
required: false
default: false
aliases: []
version_added: "1.6"
tty:
description:
- Allocate a pseudo-tty
required: false
default: false
aliases: []
version_added: "1.6"
net:
description:
- Set Network mode for the container (bridge, none, container:<name|id>, host). Requires docker >= 0.11.
required: false
default: false
aliases: []
version_added: "1.8"
registry:
description:
- The remote registry URL to use for pulling images.
required: false
default: ''
aliases: []
version_added: "1.8"
author: Cove Schneider, Joshua Conner, Pavel Antonov
requirements: [ "docker-py >= 0.3.0", "docker >= 0.10.0" ]
'''
EXAMPLES = '''
Start one docker container running tomcat in each host of the web group and bind tomcat's listening port to 8080
on the host:
- hosts: web
sudo: yes
tasks:
- name: run tomcat servers
docker: image=centos command="service tomcat6 start" ports=8080
The tomcat server's port is NAT'ed to a dynamic port on the host, but you can determine which port the server was
mapped to using docker_containers:
- hosts: web
sudo: yes
tasks:
- name: run tomcat servers
docker: image=centos command="service tomcat6 start" ports=8080 count=5
- name: Display IP address and port mappings for containers
debug: msg={{inventory_hostname}}:{{item['HostConfig']['PortBindings']['8080/tcp'][0]['HostPort']}}
with_items: docker_containers
Just as in the previous example, but iterates over the list of docker containers with a sequence:
- hosts: web
sudo: yes
vars:
start_containers_count: 5
tasks:
- name: run tomcat servers
docker: image=centos command="service tomcat6 start" ports=8080 count={{start_containers_count}}
- name: Display IP address and port mappings for containers
debug: msg="{{inventory_hostname}}:{{docker_containers[{{item}}]['HostConfig']['PortBindings']['8080/tcp'][0]['HostPort']}}"
with_sequence: start=0 end={{start_containers_count - 1}}
Stop, remove all of the running tomcat containers and list the exit code from the stopped containers:
- hosts: web
sudo: yes
tasks:
- name: stop tomcat servers
docker: image=centos command="service tomcat6 start" state=absent
- name: Display return codes from stopped containers
debug: msg="Returned {{inventory_hostname}}:{{item}}"
with_items: docker_containers
Create a named container:
- hosts: web
sudo: yes
tasks:
- name: run tomcat server
docker: image=centos name=tomcat command="service tomcat6 start" ports=8080
Create multiple named containers:
- hosts: web
sudo: yes
tasks:
- name: run tomcat servers
docker: image=centos name={{item}} command="service tomcat6 start" ports=8080
with_items:
- crookshank
- snowbell
- heathcliff
- felix
- sylvester
Create containers named in a sequence:
- hosts: web
sudo: yes
tasks:
- name: run tomcat servers
docker: image=centos name={{item}} command="service tomcat6 start" ports=8080
with_sequence: start=1 end=5 format=tomcat_%d.example.com
Create two linked containers:
- hosts: web
sudo: yes
tasks:
- name: ensure redis container is running
docker: image=crosbymichael/redis name=redis
- name: ensure redis_ambassador container is running
docker: image=svendowideit/ambassador ports=6379:6379 links=redis:redis name=redis_ambassador_ansible
Create containers with options specified as key-value pairs and lists:
- hosts: web
sudo: yes
tasks:
- docker:
image: namespace/image_name
links:
- postgresql:db
- redis:redis
Create containers with options specified as strings and lists as comma-separated strings:
- hosts: web
sudo: yes
tasks:
docker: image=namespace/image_name links=postgresql:db,redis:redis
Create a container with no networking:
- hosts: web
sudo: yes
tasks:
docker: image=namespace/image_name net=none
'''
HAS_DOCKER_PY = True
import sys
from urlparse import urlparse
try:
import docker.client
import docker.utils
from requests.exceptions import *
except ImportError, e:
HAS_DOCKER_PY = False
try:
from docker.errors import APIError as DockerAPIError
except ImportError:
from docker.client import APIError as DockerAPIError
def _human_to_bytes(number):
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if isinstance(number, int):
return number
if number[-1] == suffixes[0] and number[-2].isdigit():
return number[:-1]
i = 1
for each in suffixes[1:]:
if number[-len(each):] == suffixes[i]:
return int(number[:-len(each)]) * (1024 ** i)
i = i + 1
print "failed=True msg='Could not convert %s to integer'" % (number)
sys.exit(1)
def _ansible_facts(container_list):
return {"docker_containers": container_list}
def _docker_id_quirk(inspect):
# XXX: some quirk in docker
if 'ID' in inspect:
inspect['Id'] = inspect['ID']
del inspect['ID']
return inspect
class DockerManager:
counters = {'created':0, 'started':0, 'stopped':0, 'killed':0, 'removed':0, 'restarted':0, 'pull':0}
def __init__(self, module):
self.module = module
self.binds = None
self.volumes = None
if self.module.params.get('volumes'):
self.binds = {}
self.volumes = {}
vols = self.module.params.get('volumes')
for vol in vols:
parts = vol.split(":")
# host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container)
if len(parts) == 2:
self.volumes[parts[1]] = {}
self.binds[parts[0]] = parts[1]
# with bind mode
elif len(parts) == 3:
if parts[2] not in ['ro', 'rw']:
self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"')
ro = parts[2] == 'ro'
self.volumes[parts[1]] = {}
self.binds[parts[0]] = {'bind': parts[1], 'ro': ro}
# docker mount (e.g. /www, mounts a docker volume /www on the container at the same location)
else:
self.volumes[parts[0]] = {}
self.lxc_conf = None
if self.module.params.get('lxc_conf'):
self.lxc_conf = []
options = self.module.params.get('lxc_conf')
for option in options:
parts = option.split(':')
self.lxc_conf.append({"Key": parts[0], "Value": parts[1]})
self.exposed_ports = None
if self.module.params.get('expose'):
self.exposed_ports = self.get_exposed_ports(self.module.params.get('expose'))
self.port_bindings = None
if self.module.params.get('ports'):
self.port_bindings = self.get_port_bindings(self.module.params.get('ports'))
self.links = None
if self.module.params.get('links'):
self.links = self.get_links(self.module.params.get('links'))
self.env = self.module.params.get('env', None)
# connect to docker server
docker_url = urlparse(module.params.get('docker_url'))
docker_api_version = module.params.get('docker_api_version')
self.client = docker.Client(base_url=docker_url.geturl(), version=docker_api_version)
def get_links(self, links):
"""
Parse the links passed, if a link is specified without an alias then just create the alias of the same name as the link
"""
processed_links = {}
for link in links:
parsed_link = link.split(':', 1)
if(len(parsed_link) == 2):
processed_links[parsed_link[0]] = parsed_link[1]
else:
processed_links[parsed_link[0]] = parsed_link[0]
return processed_links
def get_exposed_ports(self, expose_list):
"""
Parse the ports and protocols (TCP/UDP) to expose in the docker-py `create_container` call from the docker CLI-style syntax.
"""
if expose_list:
exposed = []
for port in expose_list:
port = str(port).strip()
if port.endswith('/tcp') or port.endswith('/udp'):
port_with_proto = tuple(port.split('/'))
else:
# assume tcp protocol if not specified
port_with_proto = (port, 'tcp')
exposed.append(port_with_proto)
return exposed
else:
return None
def get_port_bindings(self, ports):
"""
Parse the `ports` string into a port bindings dict for the `start_container` call.
"""
binds = {}
for port in ports:
# ports could potentially be an array like [80, 443], so we make sure they're strings
# before splitting
parts = str(port).split(':')
container_port = parts[-1]
if '/' not in container_port:
container_port = int(parts[-1])
p_len = len(parts)
if p_len == 1:
# Bind `container_port` of the container to a dynamically
# allocated TCP port on all available interfaces of the host
# machine.
bind = ('0.0.0.0',)
elif p_len == 2:
# Bind `container_port` of the container to port `parts[0]` on
# all available interfaces of the host machine.
bind = ('0.0.0.0', int(parts[0]))
elif p_len == 3:
# Bind `container_port` of the container to port `parts[1]` on
# IP `parts[0]` of the host machine. If `parts[1]` empty bind
# to a dynamically allocacted port of IP `parts[0]`.
bind = (parts[0], int(parts[1])) if parts[1] else (parts[0],)
if container_port in binds:
old_bind = binds[container_port]
if isinstance(old_bind, list):
# append to list if it already exists
old_bind.append(bind)
else:
# otherwise create list that contains the old and new binds
binds[container_port] = [binds[container_port], bind]
else:
binds[container_port] = bind
return binds
def get_split_image_tag(self, image):
# If image contains a host or org name, omit that from our check
if '/' in image:
registry, resource = image.rsplit('/', 1)
else:
registry, resource = None, image
# now we can determine if image has a tag
if ':' in resource:
resource, tag = resource.split(':', 1)
if registry:
resource = '/'.join((registry, resource))
else:
tag = "latest"
resource = image
return resource, tag
def get_summary_counters_msg(self):
msg = ""
for k, v in self.counters.iteritems():
msg = msg + "%s %d " % (k, v)
return msg
def increment_counter(self, name):
self.counters[name] = self.counters[name] + 1
def has_changed(self):
for k, v in self.counters.iteritems():
if v > 0:
return True
return False
def get_inspect_containers(self, containers):
inspect = []
for i in containers:
details = self.client.inspect_container(i['Id'])
details = _docker_id_quirk(details)
inspect.append(details)
return inspect
def get_deployed_containers(self):
"""determine which images/commands are running already"""
image = self.module.params.get('image')
command = self.module.params.get('command')
if command:
command = command.strip()
name = self.module.params.get('name')
if name and not name.startswith('/'):
name = '/' + name
deployed = []
# if we weren't given a tag with the image, we need to only compare on the image name, as that
# docker will give us back the full image name including a tag in the container list if one exists.
image, tag = self.get_split_image_tag(image)
for i in self.client.containers(all=True):
running_image, running_tag = self.get_split_image_tag(i['Image'])
running_command = i['Command'].strip()
name_matches = False
if i["Names"]:
name_matches = (name and name in i['Names'])
image_matches = (running_image == image)
tag_matches = (not tag or running_tag == tag)
# if a container has an entrypoint, `command` will actually equal
# '{} {}'.format(entrypoint, command)
command_matches = (not command or running_command.endswith(command))
if name_matches or (name is None and image_matches and tag_matches and command_matches):
details = self.client.inspect_container(i['Id'])
details = _docker_id_quirk(details)
deployed.append(details)
return deployed
def get_running_containers(self):
running = []
for i in self.get_deployed_containers():
if i['State']['Running'] == True and i['State'].get('Ghost', False) == False:
running.append(i)
return running
def create_containers(self, count=1):
params = {'image': self.module.params.get('image'),
'command': self.module.params.get('command'),
'ports': self.exposed_ports,
'volumes': self.volumes,
'mem_limit': _human_to_bytes(self.module.params.get('memory_limit')),
'environment': self.env,
'hostname': self.module.params.get('hostname'),
'detach': self.module.params.get('detach'),
'name': self.module.params.get('name'),
'stdin_open': self.module.params.get('stdin_open'),
'tty': self.module.params.get('tty'),
}
if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) < 0:
params['dns'] = self.module.params.get('dns')
params['volumes_from'] = self.module.params.get('volumes_from')
def do_create(count, params):
results = []
for _ in range(count):
result = self.client.create_container(**params)
self.increment_counter('created')
results.append(result)
return results
try:
containers = do_create(count, params)
except:
resource = self.module.params.get('image')
image, tag = self.get_split_image_tag(resource)
if self.module.params.get('username'):
try:
self.client.login(
self.module.params.get('username'),
password=self.module.params.get('password'),
email=self.module.params.get('email'),
registry=self.module.params.get('registry')
)
except:
self.module.fail_json(msg="failed to login to the remote registry, check your username/password.")
try:
self.client.pull(image, tag=tag)
except:
self.module.fail_json(msg="failed to pull the specified image: %s" % resource)
self.increment_counter('pull')
containers = do_create(count, params)
return containers
def start_containers(self, containers):
params = {
'lxc_conf': self.lxc_conf,
'binds': self.binds,
'port_bindings': self.port_bindings,
'publish_all_ports': self.module.params.get('publish_all_ports'),
'privileged': self.module.params.get('privileged'),
'links': self.links,
'network_mode': self.module.params.get('net'),
}
if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) >= 0 and hasattr(docker, '__version__') and docker.__version__ > '0.3.0':
params['dns'] = self.module.params.get('dns')
params['volumes_from'] = self.module.params.get('volumes_from')
for i in containers:
self.client.start(i['Id'], **params)
self.increment_counter('started')
def stop_containers(self, containers):
for i in containers:
self.client.stop(i['Id'])
self.increment_counter('stopped')
return [self.client.wait(i['Id']) for i in containers]
def remove_containers(self, containers):
for i in containers:
self.client.remove_container(i['Id'])
self.increment_counter('removed')
def kill_containers(self, containers):
for i in containers:
self.client.kill(i['Id'])
self.increment_counter('killed')
def restart_containers(self, containers):
for i in containers:
self.client.restart(i['Id'])
self.increment_counter('restarted')
def check_dependencies(module):
"""
Ensure `docker-py` >= 0.3.0 is installed, and call module.fail_json with a
helpful error message if it isn't.
"""
if not HAS_DOCKER_PY:
module.fail_json(msg="`docker-py` doesn't seem to be installed, but is required for the Ansible Docker module.")
else:
HAS_NEW_ENOUGH_DOCKER_PY = False
if hasattr(docker, '__version__'):
# a '__version__' attribute was added to the module but not until
# after 0.3.0 was added pushed to pip. If it's there, use it.
if docker.__version__ >= '0.3.0':
HAS_NEW_ENOUGH_DOCKER_PY = True
else:
# HACK: if '__version__' isn't there, we check for the existence of
# `_get_raw_response_socket` in the docker.Client class, which was
# added in 0.3.0
if hasattr(docker.Client, '_get_raw_response_socket'):
HAS_NEW_ENOUGH_DOCKER_PY = True
if not HAS_NEW_ENOUGH_DOCKER_PY:
module.fail_json(msg="The Ansible Docker module requires `docker-py` >= 0.3.0.")
def main():
module = AnsibleModule(
argument_spec = dict(
count = dict(default=1),
image = dict(required=True),
command = dict(required=False, default=None),
expose = dict(required=False, default=None, type='list'),
ports = dict(required=False, default=None, type='list'),
publish_all_ports = dict(default=False, type='bool'),
volumes = dict(default=None, type='list'),
volumes_from = dict(default=None),
links = dict(default=None, type='list'),
memory_limit = dict(default=0),
memory_swap = dict(default=0),
docker_url = dict(default='unix://var/run/docker.sock'),
docker_api_version = dict(default=docker.client.DEFAULT_DOCKER_API_VERSION),
username = dict(default=None),
password = dict(),
email = dict(),
registry = dict(),
hostname = dict(default=None),
env = dict(type='dict'),
dns = dict(),
detach = dict(default=True, type='bool'),
state = dict(default='running', choices=['absent', 'present', 'running', 'stopped', 'killed', 'restarted']),
debug = dict(default=False, type='bool'),
privileged = dict(default=False, type='bool'),
stdin_open = dict(default=False, type='bool'),
tty = dict(default=False, type='bool'),
lxc_conf = dict(default=None, type='list'),
name = dict(default=None),
net = dict(default=None)
)
)
check_dependencies(module)
try:
manager = DockerManager(module)
state = module.params.get('state')
count = int(module.params.get('count'))
name = module.params.get('name')
image = module.params.get('image')
if count < 0:
module.fail_json(msg="Count must be greater than zero")
if count > 1 and name:
module.fail_json(msg="Count and name must not be used together")
running_containers = manager.get_running_containers()
running_count = len(running_containers)
delta = count - running_count
deployed_containers = manager.get_deployed_containers()
facts = None
failed = False
changed = False
# start/stop containers
if state in [ "running", "present" ]:
# make sure a container with `name` exists, if not create and start it
if name:
# first determine if a container with this name exists
existing_container = None
for deployed_container in deployed_containers:
if deployed_container.get('Name') == '/%s' % name:
existing_container = deployed_container
break
# the named container is running, but with a
# different image or tag, so we stop it first
if existing_container and existing_container.get('Config', dict()).get('Image') != image:
manager.stop_containers([existing_container])
manager.remove_containers([existing_container])
running_containers = manager.get_running_containers()
deployed_containers = manager.get_deployed_containers()
existing_container = None
# if the container isn't running (or if we stopped the
# old version above), create and (maybe) start it up now
if not existing_container:
containers = manager.create_containers(1)
if state == "present": # otherwise it get (re)started later anyways..
manager.start_containers(containers)
running_containers = manager.get_running_containers()
deployed_containers = manager.get_deployed_containers()
if state == "running":
# make sure a container with `name` is running
if name and "/" + name not in map(lambda x: x.get('Name'), running_containers):
manager.start_containers(deployed_containers)
# start more containers if we don't have enough
elif delta > 0:
containers = manager.create_containers(delta)
manager.start_containers(containers)
# stop containers if we have too many
elif delta < 0:
containers_to_stop = running_containers[0:abs(delta)]
containers = manager.stop_containers(containers_to_stop)
manager.remove_containers(containers_to_stop)
facts = manager.get_running_containers()
else:
facts = manager.get_deployed_containers()
# stop and remove containers
elif state == "absent":
facts = manager.stop_containers(deployed_containers)
manager.remove_containers(deployed_containers)
# stop containers
elif state == "stopped":
facts = manager.stop_containers(running_containers)
# kill containers
elif state == "killed":
manager.kill_containers(running_containers)
# restart containers
elif state == "restarted":
manager.restart_containers(running_containers)
facts = manager.get_inspect_containers(running_containers)
msg = "%s container(s) running image %s with command %s" % \
(manager.get_summary_counters_msg(), module.params.get('image'), module.params.get('command'))
changed = manager.has_changed()
module.exit_json(failed=failed, changed=changed, msg=msg, ansible_facts=_ansible_facts(facts))
except DockerAPIError, e:
changed = manager.has_changed()
module.exit_json(failed=True, changed=changed, msg="Docker API error: " + e.explanation)
except RequestException, e:
changed = manager.has_changed()
module.exit_json(failed=True, changed=changed, msg=repr(e))
# import module snippets
from ansible.module_utils.basic import *
main()

@ -1,252 +0,0 @@
#!/usr/bin/python
#
# (c) 2014, Pavel Antonov <antonov@adwz.ru>
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
######################################################################
DOCUMENTATION = '''
---
module: docker_image
author: Pavel Antonov
version_added: "1.5"
short_description: manage docker images
description:
- Create, check and remove docker images
options:
path:
description:
- Path to directory with Dockerfile
required: false
default: null
aliases: []
name:
description:
- Image name to work with
required: true
default: null
aliases: []
tag:
description:
- Image tag to work with
required: false
default: "latest"
aliases: []
nocache:
description:
- Do not use cache with building
required: false
default: false
aliases: []
docker_url:
description:
- URL of docker host to issue commands to
required: false
default: unix://var/run/docker.sock
aliases: []
state:
description:
- Set the state of the image
required: false
default: present
choices: [ "present", "absent", "build" ]
aliases: []
timeout:
description:
- Set image operation timeout
required: false
default: 600
aliases: []
requirements: [ "docker-py" ]
'''
EXAMPLES = '''
Build docker image if required. Path should contains Dockerfile to build image:
- hosts: web
sudo: yes
tasks:
- name: check or build image
docker_image: path="/path/to/build/dir" name="my/app" state=present
Build new version of image:
- hosts: web
sudo: yes
tasks:
- name: check or build image
docker_image: path="/path/to/build/dir" name="my/app" state=build
Remove image from local docker storage:
- hosts: web
sudo: yes
tasks:
- name: remove image
docker_image: name="my/app" state=absent
'''
try:
import sys
import re
import json
import docker.client
from requests.exceptions import *
from urlparse import urlparse
except ImportError, e:
print "failed=True msg='failed to import python module: %s'" % e
sys.exit(1)
try:
from docker.errors import APIError as DockerAPIError
except ImportError:
from docker.client import APIError as DockerAPIError
class DockerImageManager:
def __init__(self, module):
self.module = module
self.path = self.module.params.get('path')
self.name = self.module.params.get('name')
self.tag = self.module.params.get('tag')
self.nocache = self.module.params.get('nocache')
docker_url = urlparse(module.params.get('docker_url'))
self.client = docker.Client(base_url=docker_url.geturl(), timeout=module.params.get('timeout'))
self.changed = False
self.log = []
self.error_msg = None
def get_log(self, as_string=True):
return "".join(self.log) if as_string else self.log
def build(self):
stream = self.client.build(self.path, tag=':'.join([self.name, self.tag]), nocache=self.nocache, rm=True, stream=True)
success_search = r'Successfully built ([0-9a-f]+)'
image_id = None
self.changed = True
for chunk in stream:
if not chunk:
continue
try:
chunk_json = json.loads(chunk)
except ValueError:
continue
if 'error' in chunk_json:
self.error_msg = chunk_json['error']
return None
if 'stream' in chunk_json:
output = chunk_json['stream']
self.log.append(output)
match = re.search(success_search, output)
if match:
image_id = match.group(1)
# Just in case we skipped evaluating the JSON returned from build
# during every iteration, add an error if the image_id was never
# populated
if not image_id:
self.error_msg = 'Unknown error encountered'
return image_id
def has_changed(self):
return self.changed
def get_images(self):
filtered_images = []
images = self.client.images()
for i in images:
# Docker-py version >= 0.3 (Docker API >= 1.8)
if 'RepoTags' in i:
repotag = ':'.join([self.name, self.tag])
if not self.name or repotag in i['RepoTags']:
filtered_images.append(i)
# Docker-py version < 0.3 (Docker API < 1.8)
elif (not self.name or self.name == i['Repository']) and (not self.tag or self.tag == i['Tag']):
filtered_images.append(i)
return filtered_images
def remove_images(self):
images = self.get_images()
for i in images:
try:
self.client.remove_image(i['Id'])
self.changed = True
except DockerAPIError as e:
# image can be removed by docker if not used
pass
def main():
module = AnsibleModule(
argument_spec = dict(
path = dict(required=False, default=None),
name = dict(required=True),
tag = dict(required=False, default="latest"),
nocache = dict(default=False, type='bool'),
state = dict(default='present', choices=['absent', 'present', 'build']),
docker_url = dict(default='unix://var/run/docker.sock'),
timeout = dict(default=600, type='int'),
)
)
try:
manager = DockerImageManager(module)
state = module.params.get('state')
failed = False
image_id = None
msg = ''
do_build = False
# build image if not exists
if state == "present":
images = manager.get_images()
if len(images) == 0:
do_build = True
# build image
elif state == "build":
do_build = True
# remove image or images
elif state == "absent":
manager.remove_images()
if do_build:
image_id = manager.build()
if image_id:
msg = "Image built: %s" % image_id
else:
failed = True
msg = "Error: %s\nLog:%s" % (manager.error_msg, manager.get_log())
module.exit_json(failed=failed, changed=manager.has_changed(), msg=msg, image_id=image_id)
except DockerAPIError as e:
module.exit_json(failed=True, changed=manager.has_changed(), msg="Docker API error: " + e.explanation)
except RequestException as e:
module.exit_json(failed=True, changed=manager.has_changed(), msg=repr(e))
# import module snippets
from ansible.module_utils.basic import *
main()

File diff suppressed because it is too large Load Diff

@ -1,273 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_ami
version_added: "1.3"
short_description: create or destroy an image in ec2, return imageid
description:
- Creates or deletes ec2 images. This module has a dependency on python-boto >= 2.5
options:
instance_id:
description:
- instance id of the image to create
required: false
default: null
aliases: []
name:
description:
- The name of the new image to create
required: false
default: null
aliases: []
wait:
description:
- wait for the AMI to be in state 'available' before returning.
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
aliases: []
state:
description:
- create or deregister/delete image
required: false
default: 'present'
aliases: []
region:
description:
- The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
description:
description:
- An optional human-readable string describing the contents and purpose of the AMI.
required: false
default: null
aliases: []
no_reboot:
description:
- An optional flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the responsibility of maintaining file system integrity is left to the owner of the instance. The default choice is "no".
required: false
default: no
choices: [ "yes", "no" ]
aliases: []
image_id:
description:
- Image ID to be deregistered.
required: false
default: null
aliases: []
delete_snapshot:
description:
- Whether or not to deleted an AMI while deregistering it.
required: false
default: null
aliases: []
author: Evan Duffield <eduffield@iacquire.com>
extends_documentation_fragment: aws
'''
# Thank you to iAcquire for sponsoring development of this module.
#
# See http://alestic.com/2011/06/ec2-ami-security for more information about ensuring the security of your AMI.
EXAMPLES = '''
# Basic AMI Creation
- local_action:
module: ec2_ami
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx
wait: yes
name: newtest
register: instance
# Basic AMI Creation, without waiting
- local_action:
module: ec2_ami
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
instance_id: i-xxxxxx
wait: no
name: newtest
register: instance
# Deregister/Delete AMI
- local_action:
module: ec2_ami
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: ${instance.image_id}
delete_snapshot: True
state: absent
# Deregister AMI
- local_action:
module: ec2_ami
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: ${instance.image_id}
delete_snapshot: False
state: absent
'''
import sys
import time
try:
import boto
import boto.ec2
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def create_image(module, ec2):
"""
Creates new AMI
module : AnsibleModule object
ec2: authenticated ec2 connection object
"""
instance_id = module.params.get('instance_id')
name = module.params.get('name')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
description = module.params.get('description')
no_reboot = module.params.get('no_reboot')
try:
params = {'instance_id': instance_id,
'name': name,
'description': description,
'no_reboot': no_reboot}
image_id = ec2.create_image(**params)
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# Wait until the image is recognized. EC2 API has eventual consistency,
# such that a successful CreateImage API call doesn't guarantee the success
# of subsequent DescribeImages API call using the new image id returned.
for i in range(wait_timeout):
try:
img = ec2.get_image(image_id)
break
except boto.exception.EC2ResponseError, e:
if 'InvalidAMIID.NotFound' in e.error_code and wait:
time.sleep(1)
else:
module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help.")
else:
module.fail_json(msg="timed out waiting for image to be recognized")
# wait here until the image is created
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time() and (img is None or img.state != 'available'):
img = ec2.get_image(image_id)
time.sleep(3)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "timed out waiting for image to be created")
module.exit_json(msg="AMI creation operation complete", image_id=image_id, state=img.state, changed=True)
def deregister_image(module, ec2):
"""
Deregisters AMI
"""
image_id = module.params.get('image_id')
delete_snapshot = module.params.get('delete_snapshot')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
img = ec2.get_image(image_id)
if img == None:
module.fail_json(msg = "Image %s does not exist" % image_id, changed=False)
try:
params = {'image_id': image_id,
'delete_snapshot': delete_snapshot}
res = ec2.deregister_image(**params)
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# wait here until the image is gone
img = ec2.get_image(image_id)
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time() and img is not None:
img = ec2.get_image(image_id)
time.sleep(3)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "timed out waiting for image to be reregistered/deleted")
module.exit_json(msg="AMI deregister/delete operation complete", changed=True)
sys.exit(0)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance_id = dict(),
image_id = dict(),
delete_snapshot = dict(),
name = dict(),
wait = dict(type="bool", default=False),
wait_timeout = dict(default=900),
description = dict(default=""),
no_reboot = dict(default=False, type="bool"),
state = dict(default='present'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
ec2 = ec2_connect(module)
if module.params.get('state') == 'absent':
if not module.params.get('image_id'):
module.fail_json(msg='image_id needs to be an ami image to registered/delete')
deregister_image(module, ec2)
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning new AMI
if not module.params.get('instance_id'):
module.fail_json(msg='instance_id parameter is required for new image')
if not module.params.get('name'):
module.fail_json(msg='name parameter is required for new image')
create_image(module, ec2)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -1,196 +0,0 @@
#!/usr/bin/python
#
# (c) 2013, Nimbis Services
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_ami_search
short_description: Retrieve AWS AMI for a given operating system.
version_added: "1.6"
description:
- Look up the most recent AMI on AWS for a given operating system.
- Returns C(ami), C(aki), C(ari), C(serial), C(tag)
- If there is no AKI or ARI associated with an image, these will be C(null).
- Only supports images from cloud-images.ubuntu.com
- 'Example output: C({"ami": "ami-69f5a900", "changed": false, "aki": "aki-88aa75e1", "tag": "release", "ari": null, "serial": "20131024"})'
version_added: "1.6"
options:
distro:
description: Linux distribution (e.g., C(ubuntu))
required: true
choices: ["ubuntu"]
release:
description: short name of the release (e.g., C(precise))
required: true
stream:
description: Type of release.
required: false
default: "server"
choices: ["server", "desktop"]
store:
description: Back-end store for instance
required: false
default: "ebs"
choices: ["ebs", "ebs-io1", "ebs-ssd", "instance-store"]
arch:
description: CPU architecture
required: false
default: "amd64"
choices: ["i386", "amd64"]
region:
description: EC2 region
required: false
default: us-east-1
choices: ["ap-northeast-1", "ap-southeast-1", "ap-southeast-2",
"eu-west-1", "sa-east-1", "us-east-1", "us-west-1", "us-west-2", "us-gov-west-1"]
virt:
description: virutalization type
required: false
default: paravirtual
choices: ["paravirtual", "hvm"]
author: Lorin Hochstein
'''
EXAMPLES = '''
- name: Launch an Ubuntu 12.04 (Precise Pangolin) EC2 instance
hosts: 127.0.0.1
connection: local
tasks:
- name: Get the Ubuntu precise AMI
ec2_ami_search: distro=ubuntu release=precise region=us-west-1 store=instance-store
register: ubuntu_image
- name: Start the EC2 instance
ec2: image={{ ubuntu_image.ami }} instance_type=m1.small key_name=mykey
'''
import csv
import json
import urllib2
import urlparse
SUPPORTED_DISTROS = ['ubuntu']
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
def get_url(module, url):
""" Get url and return response """
try:
r = urllib2.urlopen(url)
except (urllib2.HTTPError, urllib2.URLError), e:
code = getattr(e, 'code', -1)
module.fail_json(msg="Request failed: %s" % str(e), status_code=code)
return r
def ubuntu(module):
""" Get the ami for ubuntu """
release = module.params['release']
stream = module.params['stream']
store = module.params['store']
arch = module.params['arch']
region = module.params['region']
virt = module.params['virt']
url = get_ubuntu_url(release, stream)
req = get_url(module, url)
reader = csv.reader(req, delimiter='\t')
try:
ami, aki, ari, tag, serial = lookup_ubuntu_ami(reader, release, stream,
store, arch, region, virt)
module.exit_json(changed=False, ami=ami, aki=aki, ari=ari, tag=tag,
serial=serial)
except KeyError:
module.fail_json(msg="No matching AMI found")
def lookup_ubuntu_ami(table, release, stream, store, arch, region, virt):
""" Look up the Ubuntu AMI that matches query given a table of AMIs
table: an iterable that returns a row of
(release, stream, tag, serial, region, ami, aki, ari, virt)
release: ubuntu release name
stream: 'server' or 'desktop'
store: 'ebs', 'ebs-io1', 'ebs-ssd' or 'instance-store'
arch: 'i386' or 'amd64'
region: EC2 region
virt: 'paravirtual' or 'hvm'
Returns (ami, aki, ari, tag, serial)"""
expected = (release, stream, store, arch, region, virt)
for row in table:
(actual_release, actual_stream, tag, serial,
actual_store, actual_arch, actual_region, ami, aki, ari,
actual_virt) = row
actual = (actual_release, actual_stream, actual_store, actual_arch,
actual_region, actual_virt)
if actual == expected:
# aki and ari are sometimes blank
if aki == '':
aki = None
if ari == '':
ari = None
return (ami, aki, ari, tag, serial)
raise KeyError()
def get_ubuntu_url(release, stream):
url = "https://cloud-images.ubuntu.com/query/%s/%s/released.current.txt"
return url % (release, stream)
def main():
arg_spec = dict(
distro=dict(required=True, choices=SUPPORTED_DISTROS),
release=dict(required=True),
stream=dict(required=False, default='server',
choices=['desktop', 'server']),
store=dict(required=False, default='ebs',
choices=['ebs', 'ebs-io1', 'ebs-ssd', 'instance-store']),
arch=dict(required=False, default='amd64',
choices=['i386', 'amd64']),
region=dict(required=False, default='us-east-1', choices=AWS_REGIONS),
virt=dict(required=False, default='paravirtual',
choices=['paravirtual', 'hvm'])
)
module = AnsibleModule(argument_spec=arg_spec)
distro = module.params['distro']
if distro == 'ubuntu':
ubuntu(module)
else:
module.fail_json(msg="Unsupported distro: %s" % distro)
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
if __name__ == '__main__':
main()

@ -1,608 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_asg
short_description: Create or delete AWS Autoscaling Groups
description:
- Can create or delete AWS Autoscaling Groups
- Works with the ec2_lc module to manage Launch Configurations
version_added: "1.6"
author: Gareth Rushgrove
options:
state:
description:
- register or deregister the instance
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for group to be created or deleted
required: true
load_balancers:
description:
- List of ELB names to use for the group
required: false
availability_zones:
description:
- List of availability zone names in which to create the group. Defaults to all the availability zones in the region if vpc_zone_identifier is not set.
required: false
launch_config_name:
description:
- Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
required: false
min_size:
description:
- Minimum number of instances in group
required: false
max_size:
description:
- Maximum number of instances in group
required: false
desired_capacity:
description:
- Desired number of instances in group
required: false
replace_all_instances:
description:
- In a rolling fashion, replace all instances with an old launch configuration with one from the current launch configuraiton.
required: false
version_added: "1.8"
default: False
replace_batch_size:
description:
- Number of instances you'd like to replace at a time. Used with replace_all_instances.
required: false
version_added: "1.8"
default: 1
replace_instances:
description:
- List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration.
required: false
version_added: "1.8"
default: None
lc_check:
description:
- Check to make sure instances that are being replaced with replace_instances do not aready have the current launch_config.
required: false
version_added: "1.8"
default: True
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
vpc_zone_identifier:
description:
- List of VPC subnets to use
required: false
default: None
tags:
description:
- A list of tags to add to the Auto Scale Group. Optional key is 'propagate_at_launch', which defaults to true.
required: false
default: None
version_added: "1.7"
health_check_period:
description:
- Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
required: false
default: 500 seconds
version_added: "1.7"
health_check_type:
description:
- The service you want the health status from, Amazon EC2 or Elastic Load Balancer.
required: false
default: EC2
version_added: "1.7"
choices: ['EC2', 'ELB']
wait_timeout:
description:
- how long before wait instances to become viable when replaced. Used in concjunction with instance_ids option.
default: 300
version_added: "1.8"
extends_documentation_fragment: aws
"""
EXAMPLES = '''
A basic example of configuration:
- ec2_asg:
name: special
load_balancers: 'lb1,lb2'
availability_zones: 'eu-west-1a,eu-west-1b'
launch_config_name: 'lc-1'
min_size: 1
max_size: 10
desired_capacity: 5
vpc_zone_identifier: 'subnet-abcd1234,subnet-1a2b3c4d'
tags:
- environment: production
propagate_at_launch: no
Below is an example of how to assign a new launch config to an ASG and terminate old instances.
All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
a rolling fashion with instances using the current launch configuration, "my_new_lc".
This could also be considered a rolling deploy of a pre-baked AMI.
If this is a newly created group, the instances will not be replaced since all instances
will have the current launch configuration.
- name: create launch config
ec2_lc:
name: my_new_lc
image_id: ami-lkajsf
key_name: mykey
region: us-east-1
security_groups: sg-23423
instance_type: m1.small
assign_public_ip: yes
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_all_instances: yes
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
To only replace a couple of instances instead of all of them, supply a list
to "replace_instances":
- ec2_asg:
name: myasg
launch_config_name: my_new_lc
health_check_period: 60
health_check_type: ELB
replace_instances:
- i-b345231
- i-24c2931
min_size: 5
max_size: 5
desired_capacity: 5
region: us-east-1
'''
import sys
import time
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
import boto.ec2.autoscale
from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, Tag
from boto.exception import BotoServerError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
ASG_ATTRIBUTES = ('availability_zones', 'default_cooldown', 'desired_capacity',
'health_check_period', 'health_check_type', 'launch_config_name',
'load_balancers', 'max_size', 'min_size', 'name', 'placement_group',
'tags', 'termination_policies', 'vpc_zone_identifier')
INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name')
def enforce_required_arguments(module):
''' As many arguments are not required for autoscale group deletion
they cannot be mandatory arguments for the module, so we enforce
them here '''
missing_args = []
for arg in ('min_size', 'max_size', 'launch_config_name'):
if module.params[arg] is None:
missing_args.append(arg)
if missing_args:
module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args))
def get_properties(autoscaling_group):
properties = dict((attr, getattr(autoscaling_group, attr)) for attr in ASG_ATTRIBUTES)
properties['healthy_instances'] = 0
properties['in_service_instances'] = 0
properties['unhealthy_instances'] = 0
properties['pending_instances'] = 0
properties['viable_instances'] = 0
properties['terminating_instances'] = 0
if autoscaling_group.instances:
properties['instances'] = [i.instance_id for i in autoscaling_group.instances]
instance_facts = {}
for i in autoscaling_group.instances:
instance_facts[i.instance_id] = {'health_status': i.health_status,
'lifecycle_state': i.lifecycle_state,
'launch_config_name': i.launch_config_name }
if i.health_status == 'Healthy' and i.lifecycle_state == 'InService':
properties['viable_instances'] += 1
if i.health_status == 'Healthy':
properties['healthy_instances'] += 1
else:
properties['unhealthy_instances'] += 1
if i.lifecycle_state == 'InService':
properties['in_service_instances'] += 1
if i.lifecycle_state == 'Terminating':
properties['terminating_instances'] += 1
if i.lifecycle_state == 'Pending':
properties['pending_instances'] += 1
properties['instance_facts'] = instance_facts
properties['load_balancers'] = autoscaling_group.load_balancers
return properties
def create_autoscaling_group(connection, module):
group_name = module.params.get('name')
load_balancers = module.params['load_balancers']
availability_zones = module.params['availability_zones']
launch_config_name = module.params.get('launch_config_name')
min_size = module.params['min_size']
max_size = module.params['max_size']
desired_capacity = module.params.get('desired_capacity')
vpc_zone_identifier = module.params.get('vpc_zone_identifier')
set_tags = module.params.get('tags')
health_check_period = module.params.get('health_check_period')
health_check_type = module.params.get('health_check_type')
as_groups = connection.get_all_groups(names=[group_name])
if not vpc_zone_identifier and not availability_zones:
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
asg_tags = []
for tag in set_tags:
if tag.has_key('key') and tag.has_key('value'): # this block is to support depricated form
asg_tags.append(Tag(key=tag.get('key'),
value=tag.get('value'),
propagate_at_launch=bool(tag.get('propagate_at_launch', True)),
resource_id=group_name))
else:
for k,v in tag.iteritems():
if k !='propagate_at_launch':
asg_tags.append(Tag(key=k,
value=v,
propagate_at_launch=bool(tag.get('propagate_at_launch', True)),
resource_id=group_name))
if not as_groups:
if not vpc_zone_identifier and not availability_zones:
availability_zones = module.params['availability_zones'] = [zone.name for zone in ec2_connection.get_all_zones()]
enforce_required_arguments(module)
launch_configs = connection.get_all_launch_configurations(names=[launch_config_name])
ag = AutoScalingGroup(
group_name=group_name,
load_balancers=load_balancers,
availability_zones=availability_zones,
launch_config=launch_configs[0],
min_size=min_size,
max_size=max_size,
desired_capacity=desired_capacity,
vpc_zone_identifier=vpc_zone_identifier,
connection=connection,
tags=asg_tags,
health_check_period=health_check_period,
health_check_type=health_check_type)
try:
connection.create_auto_scaling_group(ag)
asg_properties = get_properties(ag)
changed = True
return(changed, asg_properties)
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
as_group = as_groups[0]
changed = False
for attr in ASG_ATTRIBUTES:
if module.params.get(attr):
module_attr = module.params.get(attr)
group_attr = getattr(as_group, attr)
# we do this because AWS and the module may return the same list
# sorted differently
try:
module_attr.sort()
except:
pass
try:
group_attr.sort()
except:
pass
if group_attr != module_attr:
changed = True
setattr(as_group, attr, module_attr)
if len(set_tags) > 0:
existing_tags = as_group.tags
existing_tag_map = dict((tag.key, tag) for tag in existing_tags)
for tag in set_tags:
if tag.has_key('key') and tag.has_key('value'): # this is to support deprecated method
if 'key' not in tag:
continue
if ( not tag['key'] in existing_tag_map or
existing_tag_map[tag['key']].value != tag['value'] or
('propagate_at_launch' in tag and
existing_tag_map[tag['key']].propagate_at_launch != tag['propagate_at_launch']) ):
changed = True
continue
else:
for k,v in tag.iteritems():
if k !='propagate_at_launch':
if ( not k in existing_tag_map or
existing_tag_map[k].value != v or
('propagate_at_launch' in tag and
existing_tag_map[k].propagate_at_launch != tag['propagate_at_launch']) ):
changed = True
continue
if changed:
connection.create_or_update_tags(asg_tags)
# handle loadbalancers separately because None != []
load_balancers = module.params.get('load_balancers') or []
if load_balancers and as_group.load_balancers != load_balancers:
changed = True
as_group.load_balancers = module.params.get('load_balancers')
try:
if changed:
as_group.update()
asg_properties = get_properties(as_group)
return(changed, asg_properties)
except BotoServerError, e:
module.fail_json(msg=str(e))
result = as_groups[0]
module.exit_json(changed=changed, name=result.name,
autoscaling_group_arn=result.autoscaling_group_arn,
availability_zones=result.availability_zones,
created_time=str(result.created_time),
default_cooldown=result.default_cooldown,
health_check_period=result.health_check_period,
health_check_type=result.health_check_type,
instance_id=result.instance_id,
instances=[instance.instance_id for instance in result.instances],
launch_config_name=result.launch_config_name,
load_balancers=result.load_balancers,
min_size=result.min_size, max_size=result.max_size,
placement_group=result.placement_group,
wait_timeout = dict(default=300),
tags=result.tags,
termination_policies=result.termination_policies,
vpc_zone_identifier=result.vpc_zone_identifier)
def delete_autoscaling_group(connection, module):
group_name = module.params.get('name')
groups = connection.get_all_groups(names=[group_name])
if groups:
group = groups[0]
group.max_size = 0
group.min_size = 0
group.desired_capacity = 0
group.update()
instances = True
while instances:
tmp_groups = connection.get_all_groups(names=[group_name])
if tmp_groups:
tmp_group = tmp_groups[0]
if not tmp_group.instances:
instances = False
time.sleep(10)
group.delete()
changed=True
return changed
else:
changed=False
return changed
def get_chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
def replace(connection, module):
batch_size = module.params.get('replace_batch_size')
wait_timeout = module.params.get('wait_timeout')
group_name = module.params.get('group_name')
max_size = module.params.get('max_size')
min_size = module.params.get('min_size')
desired_capacity = module.params.get('desired_capacity')
replace_instances = module.params.get('replace_instances')
# wait for instance list to be populated on a newly provisioned ASG
instance_wait = time.time() + 30
while instance_wait > time.time():
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
if props.has_key('instances'):
instances = props['instances']
break
time.sleep(10)
if instance_wait <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too for instances to appear. %s" % time.asctime())
# determine if we need to continue
replaceable = 0
if replace_instances:
instances = replace_instances
for k in props['instance_facts'].keys():
if k in instances:
if props['instance_facts'][k]['launch_config_name'] != props['launch_config_name']:
replaceable += 1
if replaceable == 0:
changed = False
return(changed, props)
# set temporary settings and wait for them to be reached
as_group.max_size = max_size + batch_size
as_group.min_size = min_size + batch_size
as_group.desired_capacity = desired_capacity + batch_size
as_group.update()
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and min_size + batch_size > props['viable_instances']:
time.sleep(10)
as_groups = connection.get_all_groups(names=[group_name])
as_group = as_groups[0]
props = get_properties(as_group)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too for instances to appear. %s" % time.asctime())
instances = props['instances']
if replace_instances:
instances = replace_instances
for i in get_chunks(instances, batch_size):
replace_batch(connection, module, i)
# return settings to normal
as_group = connection.get_all_groups(names=[group_name])[0]
as_group.max_size = max_size
as_group.min_size = min_size
as_group.desired_capacity = desired_capacity
as_group.update()
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
changed=True
return(changed, asg_properties)
def replace_batch(connection, module, replace_instances):
group_name = module.params.get('group_name')
wait_timeout = int(module.params.get('wait_timeout'))
lc_check = module.params.get('lc_check')
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
# check to make sure instances given are actually in the given ASG
# and they have a non-current launch config
old_instances = []
instances = ( inst_id for inst_id in replace_instances if inst_id in props['instances'])
if lc_check:
for i in instances:
if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
old_instances.append(i)
else:
old_instances = instances
# set all instances given to unhealthy
for instance_id in old_instances:
connection.set_instance_health(instance_id,'Unhealthy')
# we wait to make sure the machines we marked as Unhealthy are
# no longer in the list
count = 1
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
count = 0
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
instance_facts = props['instance_facts']
instances = ( i for i in instance_facts if i in old_instances)
for i in instances:
if ( instance_facts[i]['lifecycle_state'] == 'Terminating'
or instance_facts[i]['health_status'] == 'Unhealthy' ):
count += 1
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime())
# make sure we have the latest stats after that last loop.
as_group = connection.get_all_groups(names=[group_name])[0]
props = get_properties(as_group)
# now we make sure that we have enough instances in a viable state
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and props['min_size'] > props['viable_instances']:
time.sleep(10)
as_groups = connection.get_all_groups(names=[group_name])
as_group = as_groups[0]
props = get_properties(as_group)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "Waited too long for new instances to become viable. %s" % time.asctime())
# collect final stats info
as_group = connection.get_all_groups(names=[group_name])[0]
asg_properties = get_properties(as_group)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
load_balancers=dict(type='list'),
availability_zones=dict(type='list'),
launch_config_name=dict(type='str'),
min_size=dict(type='int'),
max_size=dict(type='int'),
desired_capacity=dict(type='int'),
vpc_zone_identifier=dict(type='str'),
replace_batch_size=dict(type='int', default=1),
replace_all_instances=dict(type='bool', default=False),
replace_instances=dict(type='list', default=[]),
lc_check=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=300),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='list', default=[]),
health_check_period=dict(type='int', default=300),
health_check_type=dict(default='EC2', choices=['EC2', 'ELB']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
state = module.params.get('state')
replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
if not connection:
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
changed = False
if replace_all_instances and replace_instances:
module.fail_json(msg="You can't use replace_instances and replace_all_instances in the same task.")
if state == 'present':
create_changed, asg_properties=create_autoscaling_group(connection, module)
if replace_all_instances or replace_instances:
replace_changed, asg_properties=replace(connection, module)
elif state == 'absent':
changed = delete_autoscaling_group(connection, module)
module.exit_json( changed = changed )
if create_changed or replace_changed:
changed = True
module.exit_json( changed = changed, **asg_properties )
main()

@ -1,305 +0,0 @@
#!/usr/bin/python
DOCUMENTATION = '''
---
module: ec2_eip
short_description: associate an EC2 elastic IP with an instance.
description:
- This module associates AWS EC2 elastic IP addresses with instances
version_added: 1.4
options:
instance_id:
description:
- The EC2 instance id
required: false
public_ip:
description:
- The elastic IP address to associate with the instance.
- If absent, allocate a new address
required: false
state:
description:
- If present, associate the IP with the instance.
- If absent, disassociate the IP with the instance.
required: false
choices: ['present', 'absent']
default: present
region:
description:
- the EC2 region to use
required: false
default: null
aliases: [ ec2_region ]
in_vpc:
description:
- allocate an EIP inside a VPC or not
required: false
default: false
version_added: "1.4"
reuse_existing_ip_allowed:
description:
- Reuse an EIP that is not associated to an instance (when available), instead of allocating a new one.
required: false
default: false
version_added: "1.6"
wait_timeout:
description:
- how long to wait in seconds for newly provisioned EIPs to become available
default: 300
version_added: "1.7"
extends_documentation_fragment: aws
author: Lorin Hochstein <lorin@nimbisservices.com>
notes:
- This module will return C(public_ip) on success, which will contain the
public IP address associated with the instance.
- There may be a delay between the time the Elastic IP is assigned and when
the cloud instance is reachable via the new address. Use wait_for and pause
to delay further playbook execution until the instance is reachable, if
necessary.
'''
EXAMPLES = '''
- name: associate an elastic IP with an instance
ec2_eip: instance_id=i-1212f003 ip=93.184.216.119
- name: disassociate an elastic IP from an instance
ec2_eip: instance_id=i-1212f003 ip=93.184.216.119 state=absent
- name: allocate a new elastic IP and associate it with an instance
ec2_eip: instance_id=i-1212f003
- name: allocate a new elastic IP without associating it to anything
ec2_eip:
register: eip
- name: output the IP
debug: msg="Allocated IP is {{ eip.public_ip }}"
- name: provision new instances with ec2
ec2: keypair=mykey instance_type=c1.medium image=emi-40603AD1 wait=yes group=webserver count=3
register: ec2
- name: associate new elastic IPs with each of the instances
ec2_eip: "instance_id={{ item }}"
with_items: ec2.instance_ids
- name: allocate a new elastic IP inside a VPC in us-west-2
ec2_eip: region=us-west-2 in_vpc=yes
register: eip
- name: output the IP
debug: msg="Allocated IP inside a VPC is {{ eip.public_ip }}"
'''
try:
import boto.ec2
except ImportError:
boto_found = False
else:
boto_found = True
wait_timeout = 0
def associate_ip_and_instance(ec2, address, instance_id, module):
if ip_is_associated_with_instance(ec2, address.public_ip, instance_id, module):
module.exit_json(changed=False, public_ip=address.public_ip)
# If we're in check mode, nothing else to do
if module.check_mode:
module.exit_json(changed=True)
try:
if address.domain == "vpc":
res = ec2.associate_address(instance_id, allocation_id=address.allocation_id)
else:
res = ec2.associate_address(instance_id, public_ip=address.public_ip)
except boto.exception.EC2ResponseError, e:
module.fail_json(msg=str(e))
if res:
module.exit_json(changed=True, public_ip=address.public_ip)
else:
module.fail_json(msg="association failed")
def disassociate_ip_and_instance(ec2, address, instance_id, module):
if not ip_is_associated_with_instance(ec2, address.public_ip, instance_id, module):
module.exit_json(changed=False, public_ip=address.public_ip)
# If we're in check mode, nothing else to do
if module.check_mode:
module.exit_json(changed=True)
try:
if address.domain == "vpc":
res = ec2.disassociate_address(association_id=address.association_id)
else:
res = ec2.disassociate_address(public_ip=address.public_ip)
except boto.exception.EC2ResponseError, e:
module.fail_json(msg=str(e))
if res:
module.exit_json(changed=True)
else:
module.fail_json(msg="disassociation failed")
def find_address(ec2, public_ip, module):
""" Find an existing Elastic IP address """
if wait_timeout != 0:
timeout = time.time() + wait_timeout
while timeout > time.time():
try:
addresses = ec2.get_all_addresses([public_ip])
break
except boto.exception.EC2ResponseError, e:
if "Address '%s' not found." % public_ip in e.message :
pass
else:
module.fail_json(msg=str(e.message))
time.sleep(5)
if timeout <= time.time():
module.fail_json(msg = "wait for EIPs timeout on %s" % time.asctime())
else:
try:
addresses = ec2.get_all_addresses([public_ip])
except boto.exception.EC2ResponseError, e:
module.fail_json(msg=str(e.message))
return addresses[0]
def ip_is_associated_with_instance(ec2, public_ip, instance_id, module):
""" Check if the elastic IP is currently associated with the instance """
address = find_address(ec2, public_ip, module)
if address:
return address.instance_id == instance_id
else:
return False
def allocate_address(ec2, domain, module, reuse_existing_ip_allowed):
""" Allocate a new elastic IP address (when needed) and return it """
# If we're in check mode, nothing else to do
if module.check_mode:
module.exit_json(change=True)
if reuse_existing_ip_allowed:
if domain:
domain_filter = { 'domain' : domain }
else:
domain_filter = { 'domain' : 'standard' }
all_addresses = ec2.get_all_addresses(filters=domain_filter)
unassociated_addresses = filter(lambda a: a.instance_id == "", all_addresses)
if unassociated_addresses:
address = unassociated_addresses[0];
else:
address = ec2.allocate_address(domain=domain)
else:
address = ec2.allocate_address(domain=domain)
return address
def release_address(ec2, public_ip, module):
""" Release a previously allocated elastic IP address """
address = find_address(ec2, public_ip, module)
# If we're in check mode, nothing else to do
if module.check_mode:
module.exit_json(change=True)
res = address.release()
if res:
module.exit_json(changed=True)
else:
module.fail_json(msg="release failed")
def find_instance(ec2, instance_id, module):
""" Attempt to find the EC2 instance and return it """
try:
reservations = ec2.get_all_reservations(instance_ids=[instance_id])
except boto.exception.EC2ResponseError, e:
module.fail_json(msg=str(e))
if len(reservations) == 1:
instances = reservations[0].instances
if len(instances) == 1:
return instances[0]
module.fail_json(msg="could not find instance" + instance_id)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance_id = dict(required=False),
public_ip = dict(required=False, aliases= ['ip']),
state = dict(required=False, default='present',
choices=['present', 'absent']),
in_vpc = dict(required=False, type='bool', default=False),
reuse_existing_ip_allowed = dict(required=False, type='bool', default=False),
wait_timeout = dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not boto_found:
module.fail_json(msg="boto is required")
ec2 = ec2_connect(module)
instance_id = module.params.get('instance_id')
public_ip = module.params.get('public_ip')
state = module.params.get('state')
in_vpc = module.params.get('in_vpc')
domain = "vpc" if in_vpc else None
reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed')
new_eip_timeout = int(module.params.get('wait_timeout'))
if state == 'present':
# Allocate an EIP and exit
if not instance_id and not public_ip:
address = allocate_address(ec2, domain, module, reuse_existing_ip_allowed)
module.exit_json(changed=True, public_ip=address.public_ip)
# Return the EIP object since we've been given a public IP
if public_ip:
address = find_address(ec2, public_ip, module)
# Allocate an IP for instance since no public_ip was provided
if instance_id and not public_ip:
instance = find_instance(ec2, instance_id, module)
if instance.vpc_id:
domain = "vpc"
address = allocate_address(ec2, domain, module, reuse_existing_ip_allowed)
# overriding the timeout since this is a a newly provisioned ip
global wait_timeout
wait_timeout = new_eip_timeout
# Associate address object (provided or allocated) with instance
associate_ip_and_instance(ec2, address, instance_id, module)
else:
#disassociating address from instance
if instance_id:
address = find_address(ec2, public_ip, module)
disassociate_ip_and_instance(ec2, address, instance_id, module)
#releasing address
else:
release_address(ec2, public_ip, module)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()

@ -1,339 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_elb
short_description: De-registers or registers instances from EC2 ELBs
description:
- This module de-registers or registers an AWS EC2 instance from the ELBs
that it belongs to.
- Returns fact "ec2_elbs" which is a list of elbs attached to the instance
if state=absent is passed as an argument.
- Will be marked changed when called only if there are ELBs found to operate on.
version_added: "1.2"
author: John Jarvis
options:
state:
description:
- register or deregister the instance
required: true
choices: ['present', 'absent']
instance_id:
description:
- EC2 Instance ID
required: true
ec2_elbs:
description:
- List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
required: false
default: None
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
enable_availability_zone:
description:
- Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already
been enabled. If set to no, the task will fail if the availability zone is not enabled on the ELB.
required: false
default: yes
choices: [ "yes", "no" ]
wait:
description:
- Wait for instance registration or deregistration to complete successfully before returning.
required: false
default: yes
choices: [ "yes", "no" ]
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
wait_timeout:
description:
- Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no.
required: false
default: 0
version_added: "1.6"
extends_documentation_fragment: aws
"""
EXAMPLES = """
# basic pre_task and post_task example
pre_tasks:
- name: Gathering ec2 facts
ec2_facts:
- name: Instance De-register
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
state: 'absent'
roles:
- myrole
post_tasks:
- name: Instance Register
local_action: ec2_elb
args:
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
state: 'present'
with_items: ec2_elbs
"""
import time
import sys
import os
try:
import boto
import boto.ec2
import boto.ec2.elb
from boto.regioninfo import RegionInfo
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
class ElbManager:
"""Handles EC2 instance ELB registration and de-registration"""
def __init__(self, module, instance_id=None, ec2_elbs=None,
region=None, **aws_connect_params):
self.module = module
self.instance_id = instance_id
self.region = region
self.aws_connect_params = aws_connect_params
self.lbs = self._get_instance_lbs(ec2_elbs)
self.changed = False
def deregister(self, wait, timeout):
"""De-register the instance from all ELBs and wait for the ELB
to report it out-of-service"""
for lb in self.lbs:
initial_state = self._get_instance_health(lb)
if initial_state is None:
# The instance isn't registered with this ELB so just
# return unchanged
return
lb.deregister_instances([self.instance_id])
# The ELB is changing state in some way. Either an instance that's
# InService is moving to OutOfService, or an instance that's
# already OutOfService is being deregistered.
self.changed = True
if wait:
self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout)
def register(self, wait, enable_availability_zone, timeout):
"""Register the instance for all ELBs and wait for the ELB
to report the instance in-service"""
for lb in self.lbs:
initial_state = self._get_instance_health(lb)
if enable_availability_zone:
self._enable_availailability_zone(lb)
lb.register_instances([self.instance_id])
if wait:
self._await_elb_instance_state(lb, 'InService', initial_state, timeout)
else:
# We cannot assume no change was made if we don't wait
# to find out
self.changed = True
def exists(self, lbtest):
""" Verify that the named ELB actually exists """
found = False
for lb in self.lbs:
if lb.name == lbtest:
found=True
break
return found
def _enable_availailability_zone(self, lb):
"""Enable the current instance's availability zone in the provided lb.
Returns True if the zone was enabled or False if no change was made.
lb: load balancer"""
instance = self._get_instance()
if instance.placement in lb.availability_zones:
return False
lb.enable_zones(zones=instance.placement)
# If successful, the new zone will have been added to
# lb.availability_zones
return instance.placement in lb.availability_zones
def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout):
"""Wait for an ELB to change state
lb: load balancer
awaited_state : state to poll for (string)"""
wait_timeout = time.time() + timeout
while True:
instance_state = self._get_instance_health(lb)
if not instance_state:
msg = ("The instance %s could not be put in service on %s."
" Reason: Invalid Instance")
self.module.fail_json(msg=msg % (self.instance_id, lb))
if instance_state.state == awaited_state:
# Check the current state against the initial state, and only set
# changed if they are different.
if (initial_state is None) or (instance_state.state != initial_state.state):
self.changed = True
break
elif self._is_instance_state_pending(instance_state):
# If it's pending, we'll skip further checks andd continue waiting
pass
elif (awaited_state == 'InService'
and instance_state.reason_code == "Instance"
and time.time() >= wait_timeout):
# If the reason_code for the instance being out of service is
# "Instance" this indicates a failure state, e.g. the instance
# has failed a health check or the ELB does not have the
# instance's availabilty zone enabled. The exact reason why is
# described in InstantState.description.
msg = ("The instance %s could not be put in service on %s."
" Reason: %s")
self.module.fail_json(msg=msg % (self.instance_id,
lb,
instance_state.description))
time.sleep(1)
def _is_instance_state_pending(self, instance_state):
"""
Determines whether the instance_state is "pending", meaning there is
an operation under way to bring it in service.
"""
# This is messy, because AWS provides no way to distinguish between
# an instance that is is OutOfService because it's pending vs. OutOfService
# because it's failing health checks. So we're forced to analyze the
# description, which is likely to be brittle.
return (instance_state and 'pending' in instance_state.description)
def _get_instance_health(self, lb):
"""
Check instance health, should return status object or None under
certain error conditions.
"""
try:
status = lb.get_instance_health([self.instance_id])[0]
except boto.exception.BotoServerError, e:
if e.error_code == 'InvalidInstance':
return None
else:
raise
return status
def _get_instance_lbs(self, ec2_elbs=None):
"""Returns a list of ELBs attached to self.instance_id
ec2_elbs: an optional list of elb names that will be used
for elb lookup instead of returning what elbs
are attached to self.instance_id"""
try:
elb = connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=str(e))
elbs = elb.get_all_load_balancers()
if ec2_elbs:
lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs)
else:
lbs = []
for lb in elbs:
for info in lb.instances:
if self.instance_id == info.id:
lbs.append(lb)
return lbs
def _get_instance(self):
"""Returns a boto.ec2.InstanceObject for self.instance_id"""
try:
ec2 = connect_to_aws(boto.ec2, self.region,
**self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=str(e))
return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True},
instance_id={'required': True},
ec2_elbs={'default': None, 'required': False, 'type':'list'},
enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
wait={'required': False, 'default': True, 'type': 'bool'},
wait_timeout={'requred': False, 'default': 0, 'type': 'int'}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
ec2_elbs = module.params['ec2_elbs']
wait = module.params['wait']
enable_availability_zone = module.params['enable_availability_zone']
timeout = module.params['wait_timeout']
if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
module.fail_json(msg="ELBs are required for registration")
instance_id = module.params['instance_id']
elb_man = ElbManager(module, instance_id, ec2_elbs,
region=region, **aws_connect_params)
if ec2_elbs is not None:
for elb in ec2_elbs:
if not elb_man.exists(elb):
msg="ELB %s does not exist" % elb
module.fail_json(msg=msg)
if module.params['state'] == 'present':
elb_man.register(wait, enable_availability_zone, timeout)
elif module.params['state'] == 'absent':
elb_man.deregister(wait, timeout)
ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]}
ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -1,698 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_elb_lb
description:
- Returns information about the load balancer.
- Will be marked changed when called only if state is changed.
short_description: Creates or destroys Amazon ELB.
version_added: "1.5"
author: Jim Dalton
options:
state:
description:
- Create or destroy the ELB
required: true
name:
description:
- The name of the ELB
required: true
listeners:
description:
- List of ports/protocols for this ELB to listen on (see example)
required: false
purge_listeners:
description:
- Purge existing listeners on ELB that are not found in listeners
required: false
default: true
zones:
description:
- List of availability zones to enable on this ELB
required: false
purge_zones:
description:
- Purge existing availability zones on ELB that are not found in zones
required: false
default: false
security_group_ids:
description:
- A list of security groups to apply to the elb
require: false
default: None
version_added: "1.6"
health_check:
description:
- An associative array of health check configuration settigs (see example)
require: false
default: None
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
subnets:
description:
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
required: false
default: None
aliases: []
version_added: "1.7"
purge_subnets:
description:
- Purge existing subnet on ELB that are not found in subnets
required: false
default: false
version_added: "1.7"
scheme:
description:
- The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'.
required: false
default: 'internet-facing'
version_added: "1.7"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
connection_draining_timeout:
description:
- Wait a specified timeout allowing connections to drain before terminating an instance
required: false
aliases: []
version_added: "1.8"
cross_az_load_balancing:
description:
- Distribute load across all configured Availablity Zones
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
version_added: "1.8"
extends_documentation_fragment: aws
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
- protocol: https
load_balancer_port: 443
instance_protocol: http # optional, defaults to value of protocol setting
instance_port: 80
# ssl certificate required for https or ssl
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
# Basic VPC provisioning example
- local_action:
module: ec2_elb_lb
name: "test-vpc"
scheme: internal
state: present
subnets:
- subnet-abcd1234
- subnet-1a2b3c4d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
# Configure a health check
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
health_check:
ping_protocol: http # options are http, https, ssl, tcp
ping_port: 80
ping_path: "/index.html" # not required for tcp or ssl
response_timeout: 5 # seconds
interval: 30 # seconds
unhealthy_threshold: 2
healthy_threshold: 10
# Ensure ELB is gone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
# Normally, this module will purge any listeners that exist on the ELB
# but aren't specified in the listeners parameter. If purge_listeners is
# false it leaves them alone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_listeners: no
# Normally, this module will leave availability zones that are enabled
# on the ELB alone. If purge_zones is true, then any extreneous zones
# will be removed
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_zones: yes
# Creates a ELB and assigns a list of subnets to it.
- local_action:
module: ec2_elb_lb
state: present
name: 'New ELB'
security_group_ids: 'sg-123456, sg-67890'
region: us-west-2
subnets: 'subnet-123456, subnet-67890'
purge_subnets: yes
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with connection draining and cross availability
# zone load balancing
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
connection_draining_timeout: 60
cross_az_load_balancing: "yes"
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocols: http
- load_balancer_port: 80
- instance_port: 80
"""
import sys
import os
try:
import boto
import boto.ec2.elb
import boto.ec2.elb.attributes
from boto.ec2.elb.healthcheck import HealthCheck
from boto.regioninfo import RegionInfo
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
class ElbManager(object):
"""Handles ELB creation and destruction"""
def __init__(self, module, name, listeners=None, purge_listeners=None,
zones=None, purge_zones=None, security_group_ids=None,
health_check=None, subnets=None, purge_subnets=None,
scheme="internet-facing", connection_draining_timeout=None,
cross_az_load_balancing=None, region=None, **aws_connect_params):
self.module = module
self.name = name
self.listeners = listeners
self.purge_listeners = purge_listeners
self.zones = zones
self.purge_zones = purge_zones
self.security_group_ids = security_group_ids
self.health_check = health_check
self.subnets = subnets
self.purge_subnets = purge_subnets
self.scheme = scheme
self.connection_draining_timeout = connection_draining_timeout
self.cross_az_load_balancing = cross_az_load_balancing
self.aws_connect_params = aws_connect_params
self.region = region
self.changed = False
self.status = 'gone'
self.elb_conn = self._get_elb_connection()
self.elb = self._get_elb()
def ensure_ok(self):
"""Create the ELB"""
if not self.elb:
# Zones and listeners will be added at creation
self._create_elb()
else:
self._set_zones()
self._set_security_groups()
self._set_elb_listeners()
self._set_subnets()
self._set_health_check()
# boto has introduced support for some ELB attributes in
# different versions, so we check first before trying to
# set them to avoid errors
if self._check_attribute_support('connection_draining'):
self._set_connection_draining_timeout()
if self._check_attribute_support('cross_zone_load_balancing'):
self._set_cross_az_load_balancing()
def ensure_gone(self):
"""Destroy the ELB"""
if self.elb:
self._delete_elb()
def get_info(self):
try:
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
except:
check_elb = None
if not check_elb:
info = {
'name': self.name,
'status': self.status
}
else:
info = {
'name': check_elb.name,
'dns_name': check_elb.dns_name,
'zones': check_elb.availability_zones,
'security_group_ids': check_elb.security_groups,
'status': self.status,
'subnets': self.subnets,
'scheme': check_elb.scheme
}
if check_elb.health_check:
info['health_check'] = {
'target': check_elb.health_check.target,
'interval': check_elb.health_check.interval,
'timeout': check_elb.health_check.timeout,
'healthy_threshold': check_elb.health_check.healthy_threshold,
'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
}
if check_elb.listeners:
info['listeners'] = [l.get_complex_tuple()
for l in check_elb.listeners]
elif self.status == 'created':
# When creating a new ELB, listeners don't show in the
# immediately returned result, so just include the
# ones that were added
info['listeners'] = [self._listener_as_tuple(l)
for l in self.listeners]
else:
info['listeners'] = []
if self._check_attribute_support('connection_draining'):
info['connection_draining_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout
if self._check_attribute_support('cross_zone_load_balancing'):
is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
if is_cross_az_lb_enabled:
info['cross_az_load_balancing'] = 'yes'
else:
info['cross_az_load_balancing'] = 'no'
return info
def _get_elb(self):
elbs = self.elb_conn.get_all_load_balancers()
for elb in elbs:
if self.name == elb.name:
self.status = 'ok'
return elb
def _get_elb_connection(self):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=str(e))
def _delete_elb(self):
# True if succeeds, exception raised if not
result = self.elb_conn.delete_load_balancer(name=self.name)
if result:
self.changed = True
self.status = 'deleted'
def _create_elb(self):
listeners = [self._listener_as_tuple(l) for l in self.listeners]
self.elb = self.elb_conn.create_load_balancer(name=self.name,
zones=self.zones,
security_groups=self.security_group_ids,
complex_listeners=listeners,
subnets=self.subnets,
scheme=self.scheme)
if self.elb:
self.changed = True
self.status = 'created'
def _create_elb_listeners(self, listeners):
"""Takes a list of listener tuples and creates them"""
# True if succeeds, exception raised if not
self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
complex_listeners=listeners)
def _delete_elb_listeners(self, listeners):
"""Takes a list of listener tuples and deletes them from the elb"""
ports = [l[0] for l in listeners]
# True if succeeds, exception raised if not
self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
ports)
def _set_elb_listeners(self):
"""
Creates listeners specified by self.listeners; overwrites existing
listeners on these ports; removes extraneous listeners
"""
listeners_to_add = []
listeners_to_remove = []
listeners_to_keep = []
# Check for any listeners we need to create or overwrite
for listener in self.listeners:
listener_as_tuple = self._listener_as_tuple(listener)
# First we loop through existing listeners to see if one is
# already specified for this port
existing_listener_found = None
for existing_listener in self.elb.listeners:
# Since ELB allows only one listener on each incoming port, a
# single match on the incomping port is all we're looking for
if existing_listener[0] == listener['load_balancer_port']:
existing_listener_found = existing_listener.get_complex_tuple()
break
if existing_listener_found:
# Does it match exactly?
if listener_as_tuple != existing_listener_found:
# The ports are the same but something else is different,
# so we'll remove the exsiting one and add the new one
listeners_to_remove.append(existing_listener_found)
listeners_to_add.append(listener_as_tuple)
else:
# We already have this listener, so we're going to keep it
listeners_to_keep.append(existing_listener_found)
else:
# We didn't find an existing listener, so just add the new one
listeners_to_add.append(listener_as_tuple)
# Check for any extraneous listeners we need to remove, if desired
if self.purge_listeners:
for existing_listener in self.elb.listeners:
existing_listener_tuple = existing_listener.get_complex_tuple()
if existing_listener_tuple in listeners_to_remove:
# Already queued for removal
continue
if existing_listener_tuple in listeners_to_keep:
# Keep this one around
continue
# Since we're not already removing it and we don't need to keep
# it, let's get rid of it
listeners_to_remove.append(existing_listener_tuple)
if listeners_to_remove:
self._delete_elb_listeners(listeners_to_remove)
if listeners_to_add:
self._create_elb_listeners(listeners_to_add)
def _listener_as_tuple(self, listener):
"""Formats listener as a 4- or 5-tuples, in the order specified by the
ELB API"""
# N.B. string manipulations on protocols below (str(), upper()) is to
# ensure format matches output from ELB API
listener_list = [
listener['load_balancer_port'],
listener['instance_port'],
str(listener['protocol'].upper()),
]
# Instance protocol is not required by ELB API; it defaults to match
# load balancer protocol. We'll mimic that behavior here
if 'instance_protocol' in listener:
listener_list.append(str(listener['instance_protocol'].upper()))
else:
listener_list.append(str(listener['protocol'].upper()))
if 'ssl_certificate_id' in listener:
listener_list.append(str(listener['ssl_certificate_id']))
return tuple(listener_list)
def _enable_zones(self, zones):
try:
self.elb.enable_zones(zones)
except boto.exception.BotoServerError, e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _disable_zones(self, zones):
try:
self.elb.disable_zones(zones)
except boto.exception.BotoServerError, e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _attach_subnets(self, subnets):
self.elb_conn.attach_lb_to_subnets(self.name, subnets)
self.changed = True
def _detach_subnets(self, subnets):
self.elb_conn.detach_lb_from_subnets(self.name, subnets)
self.changed = True
def _set_subnets(self):
"""Determine which subnets need to be attached or detached on the ELB"""
if self.subnets:
if self.purge_subnets:
subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
else:
subnets_to_detach = None
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
if subnets_to_attach:
self._attach_subnets(subnets_to_attach)
if subnets_to_detach:
self._detach_subnets(subnets_to_detach)
def _set_zones(self):
"""Determine which zones need to be enabled or disabled on the ELB"""
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
set(self.zones))
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
if zones_to_disable:
self._disable_zones(zones_to_disable)
def _set_security_groups(self):
if self.security_group_ids != None and set(self.elb.security_groups) != set(self.security_group_ids):
self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
self.Changed = True
def _set_health_check(self):
"""Set health check values on ELB as needed"""
if self.health_check:
# This just makes it easier to compare each of the attributes
# and look for changes. Keys are attributes of the current
# health_check; values are desired values of new health_check
health_check_config = {
"target": self._get_health_check_target(),
"timeout": self.health_check['response_timeout'],
"interval": self.health_check['interval'],
"unhealthy_threshold": self.health_check['unhealthy_threshold'],
"healthy_threshold": self.health_check['healthy_threshold'],
}
update_health_check = False
# The health_check attribute is *not* set on newly created
# ELBs! So we have to create our own.
if not self.elb.health_check:
self.elb.health_check = HealthCheck()
for attr, desired_value in health_check_config.iteritems():
if getattr(self.elb.health_check, attr) != desired_value:
setattr(self.elb.health_check, attr, desired_value)
update_health_check = True
if update_health_check:
self.elb.configure_health_check(self.elb.health_check)
self.changed = True
def _check_attribute_support(self, attr):
return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
def _set_cross_az_load_balancing(self):
attributes = self.elb.get_attributes()
if self.cross_az_load_balancing:
attributes.cross_zone_load_balancing.enabled = True
else:
attributes.cross_zone_load_balancing.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
attributes.cross_zone_load_balancing.enabled)
def _set_connection_draining_timeout(self):
attributes = self.elb.get_attributes()
if self.connection_draining_timeout is not None:
attributes.connection_draining.enabled = True
attributes.connection_draining.timeout = self.connection_draining_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
else:
attributes.connection_draining.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
protocol = self.health_check['ping_protocol'].upper()
path = ""
if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
path = self.health_check['ping_path']
return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['present', 'absent']},
name={'required': True},
listeners={'default': None, 'required': False, 'type': 'list'},
purge_listeners={'default': True, 'required': False, 'type': 'bool'},
zones={'default': None, 'required': False, 'type': 'list'},
purge_zones={'default': False, 'required': False, 'type': 'bool'},
security_group_ids={'default': None, 'required': False, 'type': 'list'},
health_check={'default': None, 'required': False, 'type': 'dict'},
subnets={'default': None, 'required': False, 'type': 'list'},
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
scheme={'default': 'internet-facing', 'required': False},
connection_draining_timeout={'default': None, 'required': False},
cross_az_load_balancing={'default': None, 'required': False}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
name = module.params['name']
state = module.params['state']
listeners = module.params['listeners']
purge_listeners = module.params['purge_listeners']
zones = module.params['zones']
purge_zones = module.params['purge_zones']
security_group_ids = module.params['security_group_ids']
health_check = module.params['health_check']
subnets = module.params['subnets']
purge_subnets = module.params['purge_subnets']
scheme = module.params['scheme']
connection_draining_timeout = module.params['connection_draining_timeout']
cross_az_load_balancing = module.params['cross_az_load_balancing']
if state == 'present' and not listeners:
module.fail_json(msg="At least one port is required for ELB creation")
if state == 'present' and not (zones or subnets):
module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
subnets, purge_subnets, scheme,
connection_draining_timeout, cross_az_load_balancing,
region=region, **aws_connect_params)
# check for unsupported attributes for this version of boto
if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
if state == 'present':
elb_man.ensure_ok()
elif state == 'absent':
elb_man.ensure_gone()
ansible_facts = {'ec2_elb': 'info'}
ec2_facts_result = dict(changed=elb_man.changed,
elb=elb_man.get_info(),
ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -1,182 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_facts
short_description: Gathers facts about remote hosts within ec2 (aws)
version_added: "1.0"
options:
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
description:
- This module fetches data from the metadata servers in ec2 (aws) as per
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html.
The module must be called from within the EC2 instance itself.
Eucalyptus cloud provides a similar service and this module should
work with this cloud provider as well.
notes:
- Parameters to filter on ec2_facts may be added later.
author: "Silviu Dicu <silviudicu@gmail.com>"
'''
EXAMPLES = '''
# Conditional example
- name: Gather facts
action: ec2_facts
- name: Conditional
action: debug msg="This instance is a t1.micro"
when: ansible_ec2_instance_type == "t1.micro"
'''
import socket
import re
socket.setdefaulttimeout(5)
class Ec2Metadata(object):
ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/'
ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key'
ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/'
AWS_REGIONS = ('ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2',
'us-gov-west-1'
)
def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None):
self.module = module
self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri
self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri
self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri
self._data = {}
self._prefix = 'ansible_ec2_%s'
def _fetch(self, url):
(response, info) = fetch_url(self.module, url, force=True)
if response:
data = response.read()
else:
data = None
return data
def _mangle_fields(self, fields, uri, filter_patterns=['public-keys-0']):
new_fields = {}
for key, value in fields.iteritems():
split_fields = key[len(uri):].split('/')
if len(split_fields) > 1 and split_fields[1]:
new_key = "-".join(split_fields)
new_fields[self._prefix % new_key] = value
else:
new_key = "".join(split_fields)
new_fields[self._prefix % new_key] = value
for pattern in filter_patterns:
for key in new_fields.keys():
match = re.search(pattern, key)
if match:
new_fields.pop(key)
return new_fields
def fetch(self, uri, recurse=True):
raw_subfields = self._fetch(uri)
if not raw_subfields:
return
subfields = raw_subfields.split('\n')
for field in subfields:
if field.endswith('/') and recurse:
self.fetch(uri + field)
if uri.endswith('/'):
new_uri = uri + field
else:
new_uri = uri + '/' + field
if new_uri not in self._data and not new_uri.endswith('/'):
content = self._fetch(new_uri)
if field == 'security-groups':
sg_fields = ",".join(content.split('\n'))
self._data['%s' % (new_uri)] = sg_fields
else:
self._data['%s' % (new_uri)] = content
def fix_invalid_varnames(self, data):
"""Change ':'' and '-' to '_' to ensure valid template variable names"""
for (key, value) in data.items():
if ':' in key or '-' in key:
newkey = key.replace(':','_').replace('-','_')
del data[key]
data[newkey] = value
def add_ec2_region(self, data):
"""Use the 'ansible_ec2_placement_availability_zone' key/value
pair to add 'ansible_ec2_placement_region' key/value pair with
the EC2 region name.
"""
# Only add a 'ansible_ec2_placement_region' key if the
# 'ansible_ec2_placement_availability_zone' exists.
zone = data.get('ansible_ec2_placement_availability_zone')
if zone is not None:
# Use the zone name as the region name unless the zone
# name starts with a known AWS region name.
region = zone
for r in self.AWS_REGIONS:
if zone.startswith(r):
region = r
break
data['ansible_ec2_placement_region'] = region
def run(self):
self.fetch(self.uri_meta) # populate _data
data = self._mangle_fields(self._data, self.uri_meta)
data[self._prefix % 'user-data'] = self._fetch(self.uri_user)
data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh)
self.fix_invalid_varnames(data)
self.add_ec2_region(data)
return data
def main():
argument_spec = url_argument_spec()
module = AnsibleModule(
argument_spec = argument_spec,
supports_check_mode = True,
)
ec2_facts = Ec2Metadata(module).run()
ec2_facts_result = dict(changed=False, ansible_facts=ec2_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()

@ -1,386 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = '''
---
module: ec2_group
version_added: "1.3"
short_description: maintain an ec2 VPC security group.
description:
- maintains ec2 security groups. This module has a dependency on python-boto >= 2.5
options:
name:
description:
- Name of the security group.
required: true
description:
description:
- Description of the security group.
required: true
vpc_id:
description:
- ID of the VPC to create the group in.
required: false
rules:
description:
- List of firewall inbound rules to enforce in this group (see example).
required: false
rules_egress:
description:
- List of firewall outbound rules to enforce in this group (see example).
required: false
version_added: "1.6"
region:
description:
- the EC2 region to use
required: false
default: null
aliases: []
state:
version_added: "1.4"
description:
- Create or delete a security group
required: false
default: 'present'
choices: [ "present", "absent" ]
aliases: []
purge_rules:
version_added: "1.8"
description:
- Purge existing rules on security group that are not found in rules
required: false
default: 'true'
aliases: []
purge_rules_egress:
version_added: "1.8"
description:
- Purge existing rules_egree on security group that are not found in rules_egress
required: false
default: 'true'
aliases: []
extends_documentation_fragment: aws
notes:
- If a rule declares a group_name and that group doesn't exist, it will be
automatically created. In that case, group_desc should be provided as well.
The module will refuse to create a depended-on group without a description.
'''
EXAMPLES = '''
- name: example ec2 group
local_action:
module: ec2_group
name: example
description: an example EC2 group
vpc_id: 12345
region: eu-west-1a
aws_secret_key: SECRET
aws_access_key: ACCESS
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 10.0.0.0/8
- proto: udp
from_port: 10050
to_port: 10050
cidr_ip: 10.0.0.0/8
- proto: udp
from_port: 10051
to_port: 10051
group_id: sg-12345678
- proto: all
# the containing group name may be specified here
group_name: example
rules_egress:
- proto: tcp
from_port: 80
to_port: 80
group_name: example-other
# description to use if example-other needs to be created
group_desc: other example EC2 group
'''
try:
import boto.ec2
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def addRulesToLookup(rules, prefix, dict):
for rule in rules:
for grant in rule.grants:
dict["%s-%s-%s-%s-%s-%s" % (prefix, rule.ip_protocol, rule.from_port, rule.to_port,
grant.group_id, grant.cidr_ip)] = rule
def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id):
"""
Returns tuple of (group_id, ip) after validating rule params.
rule: Dict describing a rule.
name: Name of the security group being managed.
groups: Dict of all available security groups.
AWS accepts an ip range or a security group as target of a rule. This
function validate the rule specification and return either a non-None
group_id or a non-None ip range.
"""
group_id = None
group_name = None
ip = None
target_group_created = False
if 'group_id' in rule and 'cidr_ip' in rule:
module.fail_json(msg="Specify group_id OR cidr_ip, not both")
elif 'group_name' in rule and 'cidr_ip' in rule:
module.fail_json(msg="Specify group_name OR cidr_ip, not both")
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg="Specify group_id OR group_name, not both")
elif 'group_id' in rule:
group_id = rule['group_id']
elif 'group_name' in rule:
group_name = rule['group_name']
if group_name in groups:
group_id = groups[group_name].id
elif group_name == name:
group_id = group.id
groups[group_id] = group
groups[group_name] = group
else:
if not rule.get('group_desc', '').strip():
module.fail_json(msg="group %s will be automatically created by rule %s and no description was provided" % (group_name, rule))
if not module.check_mode:
auto_group = ec2.create_security_group(group_name, rule['group_desc'], vpc_id=vpc_id)
group_id = auto_group.id
groups[group_id] = auto_group
groups[group_name] = auto_group
target_group_created = True
elif 'cidr_ip' in rule:
ip = rule['cidr_ip']
return group_id, ip, target_group_created
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
description=dict(required=True),
vpc_id=dict(),
rules=dict(),
rules_egress=dict(),
state = dict(default='present', choices=['present', 'absent']),
purge_rules=dict(default=True, required=False, type='bool'),
purge_rules_egress=dict(default=True, required=False, type='bool'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
name = module.params['name']
description = module.params['description']
vpc_id = module.params['vpc_id']
rules = module.params['rules']
rules_egress = module.params['rules_egress']
state = module.params.get('state')
purge_rules = module.params['purge_rules']
purge_rules_egress = module.params['purge_rules_egress']
changed = False
ec2 = ec2_connect(module)
# find the group if present
group = None
groups = {}
for curGroup in ec2.get_all_security_groups():
groups[curGroup.id] = curGroup
groups[curGroup.name] = curGroup
if curGroup.name == name and (vpc_id is None or curGroup.vpc_id == vpc_id):
group = curGroup
# Ensure requested group is absent
if state == 'absent':
if group:
'''found a match, delete it'''
try:
group.delete()
except Exception, e:
module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e))
else:
group = None
changed = True
else:
'''no match found, no changes required'''
# Ensure requested group is present
elif state == 'present':
if group:
'''existing group found'''
# check the group parameters are correct
group_in_use = False
rs = ec2.get_all_instances()
for r in rs:
for i in r.instances:
group_in_use |= reduce(lambda x, y: x | (y.name == 'public-ssh'), i.groups, False)
if group.description != description:
if group_in_use:
module.fail_json(msg="Group description does not match, but it is in use so cannot be changed.")
# if the group doesn't exist, create it now
else:
'''no match found, create it'''
if not module.check_mode:
group = ec2.create_security_group(name, description, vpc_id=vpc_id)
# When a group is created, an egress_rule ALLOW ALL
# to 0.0.0.0/0 is added automatically but it's not
# reflected in the object returned by the AWS API
# call. We re-read the group for getting an updated object
# amazon sometimes takes a couple seconds to update the security group so wait till it exists
while len(ec2.get_all_security_groups(filters={ 'group_id': group.id, })) == 0:
time.sleep(0.1)
group = ec2.get_all_security_groups(group_ids=(group.id,))[0]
changed = True
else:
module.fail_json(msg="Unsupported state requested: %s" % state)
# create a lookup for all existing rules on the group
if group:
# Manage ingress rules
groupRules = {}
addRulesToLookup(group.rules, 'in', groupRules)
# Now, go through all provided rules and ensure they are there.
if rules:
for rule in rules:
group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id)
if target_group_created:
changed = True
if rule['proto'] in ('all', '-1', -1):
rule['proto'] = -1
rule['from_port'] = None
rule['to_port'] = None
# If rule already exists, don't later delete it
ruleId = "%s-%s-%s-%s-%s-%s" % ('in', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip)
if ruleId in groupRules:
del groupRules[ruleId]
# Otherwise, add new rule
else:
grantGroup = None
if group_id:
grantGroup = groups[group_id]
if not module.check_mode:
group.authorize(rule['proto'], rule['from_port'], rule['to_port'], ip, grantGroup)
changed = True
# Finally, remove anything left in the groupRules -- these will be defunct rules
if purge_rules:
for rule in groupRules.itervalues() :
for grant in rule.grants:
grantGroup = None
if grant.group_id:
grantGroup = groups[grant.group_id]
if not module.check_mode:
group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup)
changed = True
# Manage egress rules
groupRules = {}
addRulesToLookup(group.rules_egress, 'out', groupRules)
# Now, go through all provided rules and ensure they are there.
if rules_egress:
for rule in rules_egress:
group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id)
if target_group_created:
changed = True
if rule['proto'] in ('all', '-1', -1):
rule['proto'] = -1
rule['from_port'] = None
rule['to_port'] = None
# If rule already exists, don't later delete it
ruleId = "%s-%s-%s-%s-%s-%s" % ('out', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip)
if ruleId in groupRules:
del groupRules[ruleId]
# Otherwise, add new rule
else:
grantGroup = None
if group_id:
grantGroup = groups[group_id].id
if not module.check_mode:
ec2.authorize_security_group_egress(
group_id=group.id,
ip_protocol=rule['proto'],
from_port=rule['from_port'],
to_port=rule['to_port'],
src_group_id=grantGroup,
cidr_ip=ip)
changed = True
elif vpc_id and not module.check_mode:
# when using a vpc, but no egress rules are specified,
# we add in a default allow all out rule, which was the
# default behavior before egress rules were added
default_egress_rule = 'out--1-None-None-None-0.0.0.0/0'
if default_egress_rule not in groupRules:
ec2.authorize_security_group_egress(
group_id=group.id,
ip_protocol=-1,
from_port=None,
to_port=None,
src_group_id=None,
cidr_ip='0.0.0.0/0'
)
changed = True
else:
# make sure the default egress rule is not removed
del groupRules[default_egress_rule]
# Finally, remove anything left in the groupRules -- these will be defunct rules
if purge_rules_egress:
for rule in groupRules.itervalues():
for grant in rule.grants:
grantGroup = None
if grant.group_id:
grantGroup = groups[grant.group_id].id
if not module.check_mode:
ec2.revoke_security_group_egress(
group_id=group.id,
ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group_id=grantGroup,
cidr_ip=grant.cidr_ip)
changed = True
if group:
module.exit_json(changed=changed, group_id=group.id)
else:
module.exit_json(changed=changed, group_id=None)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -1,238 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = '''
---
module: ec2_key
version_added: "1.5"
short_description: maintain an ec2 key pair.
description:
- maintains ec2 key pairs. This module has a dependency on python-boto >= 2.5
options:
name:
description:
- Name of the key pair.
required: true
key_material:
description:
- Public key material.
required: false
region:
description:
- the EC2 region to use
required: false
default: null
aliases: []
state:
description:
- create or delete keypair
required: false
default: 'present'
aliases: []
wait:
description:
- Wait for the specified action to complete before returning.
required: false
default: false
aliases: []
version_added: "1.6"
wait_timeout:
description:
- How long before wait gives up, in seconds
required: false
default: 300
aliases: []
version_added: "1.6"
extends_documentation_fragment: aws
author: Vincent Viallet
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Creates a new ec2 key pair named `example` if not present, returns generated
# private key
- name: example ec2 key
local_action:
module: ec2_key
name: example
# Creates a new ec2 key pair named `example` if not present using provided key
# material
- name: example2 ec2 key
local_action:
module: ec2_key
name: example2
key_material: 'ssh-rsa AAAAxyz...== me@example.com'
state: present
# Creates a new ec2 key pair named `example` if not present using provided key
# material
- name: example3 ec2 key
local_action:
module: ec2_key
name: example3
key_material: "{{ item }}"
with_file: /path/to/public_key.id_rsa.pub
# Removes ec2 key pair by name
- name: remove example key
local_action:
module: ec2_key
name: example
state: absent
'''
try:
import boto.ec2
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
import random
import string
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
key_material=dict(required=False),
state = dict(default='present', choices=['present', 'absent']),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
name = module.params['name']
state = module.params.get('state')
key_material = module.params.get('key_material')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
ec2 = ec2_connect(module)
# find the key if present
key = ec2.get_key_pair(name)
# Ensure requested key is absent
if state == 'absent':
if key:
'''found a match, delete it'''
try:
key.delete()
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if not ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be removed")
except Exception, e:
module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e))
else:
key = None
changed = True
else:
'''no match found, no changes required'''
# Ensure requested key is present
elif state == 'present':
if key:
# existing key found
if key_material:
# EC2's fingerprints are non-trivial to generate, so push this key
# to a temporary name and make ec2 calculate the fingerprint for us.
#
# http://blog.jbrowne.com/?p=23
# https://forums.aws.amazon.com/thread.jspa?messageID=352828
# find an unused name
test = 'empty'
while test:
randomchars = [random.choice(string.ascii_letters + string.digits) for x in range(0,10)]
tmpkeyname = "ansible-" + ''.join(randomchars)
test = ec2.get_key_pair(tmpkeyname)
# create tmp key
tmpkey = ec2.import_key_pair(tmpkeyname, key_material)
# get tmp key fingerprint
tmpfingerprint = tmpkey.fingerprint
# delete tmp key
tmpkey.delete()
if key.fingerprint != tmpfingerprint:
if not module.check_mode:
key.delete()
key = ec2.import_key_pair(name, key_material)
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be re-created")
changed = True
pass
# if the key doesn't exist, create it now
else:
'''no match found, create it'''
if not module.check_mode:
if key_material:
'''We are providing the key, need to import'''
key = ec2.import_key_pair(name, key_material)
else:
'''
No material provided, let AWS handle the key creation and
retrieve the private key
'''
key = ec2.create_key_pair(name)
if wait:
start = time.time()
action_complete = False
while (time.time() - start) < wait_timeout:
if ec2.get_key_pair(name):
action_complete = True
break
time.sleep(1)
if not action_complete:
module.fail_json(msg="timed out while waiting for the key to be created")
changed = True
if key:
data = {
'name': key.name,
'fingerprint': key.fingerprint
}
if key.material:
data.update({'private_key': key.material})
module.exit_json(changed=changed, key=data)
else:
module.exit_json(changed=changed, key=None)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -1,278 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_lc
short_description: Create or delete AWS Autoscaling Launch Configurations
description:
- Can create or delete AwS Autoscaling Configurations
- Works with the ec2_asg module to manage Autoscaling Groups
notes:
- "Amazon ASG Autoscaling Launch Configurations are immutable once created, so modifying the configuration
after it is changed will not modify the launch configuration on AWS. You must create a new config and assign
it to the ASG instead."
version_added: "1.6"
author: Gareth Rushgrove
options:
state:
description:
- register or deregister the instance
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for configuration
required: true
instance_type:
description:
- instance type to use for the instance
required: true
default: null
aliases: []
image_id:
description:
- The AMI unique identifier to be used for the group
required: false
key_name:
description:
- The SSH key name to be used for access to managed instances
required: false
security_groups:
description:
- A list of security groups into which instances should be found
required: false
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
volumes:
description:
- a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume.
required: false
default: null
aliases: []
user_data:
description:
- opaque blob of data which is made available to the ec2 instance
required: false
default: null
aliases: []
kernel_id:
description:
- Kernel id for the EC2 instance
required: false
default: null
aliases: []
spot_price:
description:
- The spot price you are bidding. Only applies for an autoscaling group with spot instances.
required: false
default: null
instance_monitoring:
description:
- whether instances in group are launched with detailed monitoring.
required: false
default: false
aliases: []
assign_public_ip:
description:
- Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.
required: false
default: false
aliases: []
version_added: "1.8"
ramdisk_id:
description:
- A RAM disk id for the instances.
required: false
default: null
aliases: []
version_added: "1.8"
instance_profile_name:
description:
- The name or the Amazon Resource Name (ARN) of the instance profile associated with the IAM role for the instances.
required: false
default: null
aliases: []
version_added: "1.8"
ebs_optimized:
description:
- Specifies whether the instance is optimized for EBS I/O (true) or not (false).
required: false
default: false
aliases: []
version_added: "1.8"
extends_documentation_fragment: aws
"""
EXAMPLES = '''
- ec2_lc:
name: special
image_id: ami-XXX
key_name: default
security_groups: 'group,group2'
instance_type: t1.micro
'''
import sys
import time
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
import boto.ec2.autoscale
from boto.ec2.autoscale import LaunchConfiguration
from boto.exception import BotoServerError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def create_block_device(module, volume):
# Not aware of a way to determine this programatically
# http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
MAX_IOPS_TO_SIZE_RATIO = 30
if 'snapshot' not in volume and 'ephemeral' not in volume:
if 'volume_size' not in volume:
module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
if 'snapshot' in volume:
if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume:
module.fail_json(msg='io1 volumes must have an iops value set')
if 'ephemeral' in volume:
if 'snapshot' in volume:
module.fail_json(msg='Cannot set both ephemeral and snapshot')
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'),
volume_type=volume.get('device_type'),
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'))
def create_launch_config(connection, module):
name = module.params.get('name')
image_id = module.params.get('image_id')
key_name = module.params.get('key_name')
security_groups = module.params['security_groups']
user_data = module.params.get('user_data')
volumes = module.params['volumes']
instance_type = module.params.get('instance_type')
spot_price = module.params.get('spot_price')
instance_monitoring = module.params.get('instance_monitoring')
assign_public_ip = module.params.get('assign_public_ip')
kernel_id = module.params.get('kernel_id')
ramdisk_id = module.params.get('ramdisk_id')
instance_profile_name = module.params.get('instance_profile_name')
ebs_optimized = module.params.get('ebs_optimized')
bdm = BlockDeviceMapping()
if volumes:
for volume in volumes:
if 'device_name' not in volume:
module.fail_json(msg='Device name must be set for volume')
# Minimum volume size is 1GB. We'll use volume size explicitly set to 0
# to be a signal not to create this volume
if 'volume_size' not in volume or int(volume['volume_size']) > 0:
bdm[volume['device_name']] = create_block_device(module, volume)
lc = LaunchConfiguration(
name=name,
image_id=image_id,
key_name=key_name,
security_groups=security_groups,
user_data=user_data,
block_device_mappings=[bdm],
instance_type=instance_type,
kernel_id=kernel_id,
spot_price=spot_price,
instance_monitoring=instance_monitoring,
associate_public_ip_address = assign_public_ip,
ramdisk_id=ramdisk_id,
instance_profile_name=instance_profile_name,
ebs_optimized=ebs_optimized,
)
launch_configs = connection.get_all_launch_configurations(names=[name])
changed = False
if not launch_configs:
try:
connection.create_launch_configuration(lc)
launch_configs = connection.get_all_launch_configurations(names=[name])
changed = True
except BotoServerError, e:
module.fail_json(msg=str(e))
result = launch_configs[0]
module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time),
image_id=result.image_id, arn=result.launch_configuration_arn,
security_groups=result.security_groups, instance_type=instance_type)
def delete_launch_config(connection, module):
name = module.params.get('name')
launch_configs = connection.get_all_launch_configurations(names=[name])
if launch_configs:
launch_configs[0].delete()
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
image_id=dict(type='str'),
key_name=dict(type='str'),
security_groups=dict(type='list'),
user_data=dict(type='str'),
kernel_id=dict(type='str'),
volumes=dict(type='list'),
instance_type=dict(type='str'),
state=dict(default='present', choices=['present', 'absent']),
spot_price=dict(type='float'),
ramdisk_id=dict(type='str'),
instance_profile_name=dict(type='str'),
ebs_optimized=dict(default=False, type='bool'),
associate_public_ip_address=dict(type='bool'),
instance_monitoring=dict(default=False, type='bool'),
assign_public_ip=dict(default=False, type='bool')
)
)
module = AnsibleModule(argument_spec=argument_spec)
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
state = module.params.get('state')
if state == 'present':
create_launch_config(connection, module)
elif state == 'absent':
delete_launch_config(connection, module)
main()

@ -1,282 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: ec2_metric_alarm
short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
description:
- Can create or delete AWS metric alarms
- Metrics you wish to alarm on must already exist
version_added: "1.6"
author: Zacharie Eakin
options:
state:
description:
- register or deregister the alarm
required: true
choices: ['present', 'absent']
name:
desciption:
- Unique name for the alarm
required: true
metric:
description:
- Name of the monitored metric (e.g. CPUUtilization)
- Metric must already exist
required: false
namespace:
description:
- Name of the appropriate namespace ('AWS/EC2', 'System/Linux', etc.), which determines the category it will appear under in cloudwatch
required: false
statistic:
description:
- Operation applied to the metric
- Works in conjunction with period and evaluation_periods to determine the comparison value
required: false
options: ['SampleCount','Average','Sum','Minimum','Maximum']
comparison:
description:
- Determines how the threshold value is compared
required: false
options: ['<=','<','>','>=']
threshold:
description:
- Sets the min/max bound for triggering the alarm
required: false
period:
description:
- The time (in seconds) between metric evaluations
required: false
evaluation_periods:
description:
- The number of times in which the metric is evaluated before final calculation
required: false
unit:
description:
- The threshold's unit of measurement
required: false
options: ['Seconds','Microseconds','Milliseconds','Bytes','Kilobytes','Megabytes','Gigabytes','Terabytes','Bits','Kilobits','Megabits','Gigabits','Terabits','Percent','Count','Bytes/Second','Kilobytes/Second','Megabytes/Second','Gigabytes/Second','Terabytes/Second','Bits/Second','Kilobits/Second','Megabits/Second','Gigabits/Second','Terabits/Second','Count/Second','None']
description:
description:
- A longer desciption of the alarm
required: false
dimensions:
description:
- Describes to what the alarm is applied
required: false
alarm_actions:
description:
- A list of the names action(s) taken when the alarm is in the 'alarm' status
required: false
insufficient_data_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status
required: false
ok_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'ok' status
required: false
extends_documentation_fragment: aws
"""
EXAMPLES = '''
- name: create alarm
ec2_metric_alarm:
state: present
region: ap-southeast-2
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 5.0
period: 300
evaluation_periods: 3
unit: "Percent"
description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
dimensions: {'InstanceId':'i-XXX'}
alarm_actions: ["action1","action2"]
'''
import sys
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
from boto.exception import BotoServerError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def create_metric_alarm(connection, module):
name = module.params.get('name')
metric = module.params.get('metric')
namespace = module.params.get('namespace')
statistic = module.params.get('statistic')
comparison = module.params.get('comparison')
threshold = module.params.get('threshold')
period = module.params.get('period')
evaluation_periods = module.params.get('evaluation_periods')
unit = module.params.get('unit')
description = module.params.get('description')
dimensions = module.params.get('dimensions')
alarm_actions = module.params.get('alarm_actions')
insufficient_data_actions = module.params.get('insufficient_data_actions')
ok_actions = module.params.get('ok_actions')
alarms = connection.describe_alarms(alarm_names=[name])
if not alarms:
alm = MetricAlarm(
name=name,
metric=metric,
namespace=namespace,
statistic=statistic,
comparison=comparison,
threshold=threshold,
period=period,
evaluation_periods=evaluation_periods,
unit=unit,
description=description,
dimensions=dimensions,
alarm_actions=alarm_actions,
insufficient_data_actions=insufficient_data_actions,
ok_actions=ok_actions
)
try:
connection.create_alarm(alm)
changed = True
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
alarm = alarms[0]
changed = False
for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'):
if getattr(alarm, attr) != module.params.get(attr):
changed = True
setattr(alarm, attr, module.params.get(attr))
#this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
comparison = alarm.comparison
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions')
dim2 = alarm.dimensions
for keys in dim1:
if not isinstance(dim1[keys], list):
dim1[keys] = [dim1[keys]]
if dim1[keys] != dim2[keys]:
changed=True
setattr(alarm, 'dimensions', dim1)
for attr in ('alarm_actions','insufficient_data_actions','ok_actions'):
action = module.params.get(attr) or []
if getattr(alarm, attr) != action:
changed = True
setattr(alarm, attr, module.params.get(attr))
try:
if changed:
connection.create_alarm(alarm)
except BotoServerError, e:
module.fail_json(msg=str(e))
result = alarms[0]
module.exit_json(changed=changed, name=result.name,
actions_enabled=result.actions_enabled,
alarm_actions=result.alarm_actions,
alarm_arn=result.alarm_arn,
comparison=result.comparison,
description=result.description,
dimensions=result.dimensions,
evaluation_periods=result.evaluation_periods,
insufficient_data_actions=result.insufficient_data_actions,
last_updated=result.last_updated,
metric=result.metric,
namespace=result.namespace,
ok_actions=result.ok_actions,
period=result.period,
state_reason=result.state_reason,
state_value=result.state_value,
statistic=result.statistic,
threshold=result.threshold,
unit=result.unit)
def delete_metric_alarm(connection, module):
name = module.params.get('name')
alarms = connection.describe_alarms(alarm_names=[name])
if alarms:
try:
connection.delete_alarms([name])
module.exit_json(changed=True)
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
metric=dict(type='str'),
namespace=dict(type='str'),
statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict'),
alarm_actions=dict(type='list'),
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
region=dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS),
)
)
module = AnsibleModule(argument_spec=argument_spec)
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
main()

@ -1,177 +0,0 @@
#!/usr/bin/python
DOCUMENTATION = """
module: ec2_scaling_policy
short_description: Create or delete AWS scaling policies for Autoscaling groups
description:
- Can create or delete scaling policies for autoscaling groups
- Referenced autoscaling groups must already exist
version_added: "1.6"
author: Zacharie Eakin
options:
state:
description:
- register or deregister the policy
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the scaling policy
required: true
asg_name:
description:
- Name of the associated autoscaling group
required: true
adjustment_type:
desciption:
- The type of change in capacity of the autoscaling group
required: false
choices: ['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']
scaling_adjustment:
description:
- The amount by which the autoscaling group is adjusted by the policy
required: false
min_adjustment_step:
description:
- Minimum amount of adjustment when policy is triggered
required: false
cooldown:
description:
- The minimum period of time between which autoscaling actions can take place
required: false
extends_documentation_fragment: aws
"""
EXAMPLES = '''
- ec2_scaling_policy:
state: present
region: US-XXX
name: "scaledown-policy"
adjustment_type: "ChangeInCapacity"
asg_name: "slave-pool"
scaling_adjustment: -1
min_adjustment_step: 1
cooldown: 300
'''
import sys
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
try:
import boto.ec2.autoscale
from boto.ec2.autoscale import ScalingPolicy
from boto.exception import BotoServerError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def create_scaling_policy(connection, module):
sp_name = module.params.get('name')
adjustment_type = module.params.get('adjustment_type')
asg_name = module.params.get('asg_name')
scaling_adjustment = module.params.get('scaling_adjustment')
min_adjustment_step = module.params.get('min_adjustment_step')
cooldown = module.params.get('cooldown')
scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
if not scalingPolicies:
sp = ScalingPolicy(
name=sp_name,
adjustment_type=adjustment_type,
as_name=asg_name,
scaling_adjustment=scaling_adjustment,
min_adjustment_step=min_adjustment_step,
cooldown=cooldown)
try:
connection.create_scaling_policy(sp)
policy = connection.get_all_policies(policy_names=[sp_name])[0]
module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError, e:
module.fail_json(msg=str(e))
else:
policy = scalingPolicies[0]
changed = False
# min_adjustment_step attribute is only relevant if the adjustment_type
# is set to percentage change in capacity, so it is a special case
if getattr(policy, 'adjustment_type') == 'PercentChangeInCapacity':
if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'):
changed = True
# set the min adjustment step incase the user decided to change their
# adjustment type to percentage
setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step'))
# check the remaining attributes
for attr in ('adjustment_type','scaling_adjustment','cooldown'):
if getattr(policy, attr) != module.params.get(attr):
changed = True
setattr(policy, attr, module.params.get(attr))
try:
if changed:
connection.create_scaling_policy(policy)
policy = connection.get_all_policies(policy_names=[sp_name])[0]
module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError, e:
module.fail_json(msg=str(e))
def delete_scaling_policy(connection, module):
sp_name = module.params.get('name')
asg_name = module.params.get('asg_name')
scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
if scalingPolicies:
try:
connection.delete_policy(sp_name, asg_name)
module.exit_json(changed=True)
except BotoServerError, e:
module.exit_json(changed=False, msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name = dict(required=True, type='str'),
adjustment_type = dict(type='str', choices=['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']),
asg_name = dict(required=True, type='str'),
scaling_adjustment = dict(type='int'),
min_adjustment_step = dict(type='int'),
cooldown = dict(type='int'),
region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
state = module.params.get('state')
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
if not connection:
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
if state == 'present':
create_scaling_policy(connection, module)
elif state == 'absent':
delete_scaling_policy(connection, module)
main()

@ -1,151 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_snapshot
short_description: creates a snapshot from an existing volume
description:
- creates an EC2 snapshot from an existing EBS volume
version_added: "1.5"
options:
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
volume_id:
description:
- volume from which to take the snapshot
required: false
description:
description:
- description to be applied to the snapshot
required: false
instance_id:
description:
- instance that has the required volume to snapshot mounted
required: false
device_name:
description:
- device name of a mounted volume to be snapshotted
required: false
snapshot_tags:
description:
- a hash/dictionary of tags to add to the snapshot
required: false
version_added: "1.6"
author: Will Thames
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Simple snapshot of volume using volume_id
- local_action:
module: ec2_snapshot
volume_id: vol-abcdef12
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume mounted on device_name attached to instance_id
- local_action:
module: ec2_snapshot
instance_id: i-12345678
device_name: /dev/sdb1
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume with tagging
- local_action:
module: ec2_snapshot
instance_id: i-12345678
device_name: /dev/sdb1
snapshot_tags:
frequency: hourly
source: /data
'''
import sys
import time
try:
import boto.ec2
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
volume_id = dict(),
description = dict(),
instance_id = dict(),
device_name = dict(),
wait = dict(type='bool', default='true'),
wait_timeout = dict(default=0),
snapshot_tags = dict(type='dict', default=dict()),
)
)
module = AnsibleModule(argument_spec=argument_spec)
volume_id = module.params.get('volume_id')
description = module.params.get('description')
instance_id = module.params.get('instance_id')
device_name = module.params.get('device_name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
snapshot_tags = module.params.get('snapshot_tags')
if not volume_id and not instance_id or volume_id and instance_id:
module.fail_json('One and only one of volume_id or instance_id must be specified')
if instance_id and not device_name or device_name and not instance_id:
module.fail_json('Instance ID and device name must both be specified')
ec2 = ec2_connect(module)
if instance_id:
try:
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
if not volumes:
module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
volume_id = volumes[0].id
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
try:
snapshot = ec2.create_snapshot(volume_id, description=description)
time_waited = 0
if wait:
snapshot.update()
while snapshot.status != 'completed':
time.sleep(3)
snapshot.update()
time_waited += 3
if wait_timeout and time_waited > wait_timeout:
module.fail_json('Timed out while creating snapshot.')
for k, v in snapshot_tags.items():
snapshot.add_tag(k, v)
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
module.exit_json(changed=True, snapshot_id=snapshot.id, volume_id=snapshot.volume_id,
volume_size=snapshot.volume_size, tags=snapshot.tags.copy())
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -1,152 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_tag
short_description: create and remove tag(s) to ec2 resources.
description:
- Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto.
version_added: "1.3"
options:
resource:
description:
- The EC2 resource id.
required: true
default: null
aliases: []
state:
description:
- Whether the tags should be present or absent on the resource. Use list to interrogate the tags of an instance.
required: false
default: present
choices: ['present', 'absent', 'list']
aliases: []
region:
description:
- region in which the resource exists.
required: false
default: null
aliases: ['aws_region', 'ec2_region']
author: Lester Wade
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Basic example of adding tag(s)
tasks:
- name: tag a resource
local_action: ec2_tag resource=vol-XXXXXX region=eu-west-1 state=present
args:
tags:
Name: ubervol
env: prod
# Playbook example of adding tag(s) to spawned instances
tasks:
- name: launch some instances
local_action: ec2 keypair={{ keypair }} group={{ security_group }} instance_type={{ instance_type }} image={{ image_id }} wait=true region=eu-west-1
register: ec2
- name: tag my launched instances
local_action: ec2_tag resource={{ item.id }} region=eu-west-1 state=present
with_items: ec2.instances
args:
tags:
Name: webserver
env: prod
'''
# Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes.
# if state=present and it doesn't exist, create, tag and attach.
# Check for state by looking for volume attachment with tag (and against block device mapping?).
# Would personally like to revisit this in May when Eucalyptus also has tagging support (3.3).
import sys
import time
try:
import boto.ec2
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
resource = dict(required=True),
tags = dict(),
state = dict(default='present', choices=['present', 'absent', 'list']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
resource = module.params.get('resource')
tags = module.params.get('tags')
state = module.params.get('state')
ec2 = ec2_connect(module)
# We need a comparison here so that we can accurately report back changed status.
# Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate.
filters = {'resource-id' : resource}
gettags = ec2.get_all_tags(filters=filters)
dictadd = {}
dictremove = {}
baddict = {}
tagdict = {}
for tag in gettags:
tagdict[tag.name] = tag.value
if state == 'present':
if not tags:
module.fail_json(msg="tags argument is required when state is present")
if set(tags.items()).issubset(set(tagdict.items())):
module.exit_json(msg="Tags already exists in %s." %resource, changed=False)
else:
for (key, value) in set(tags.items()):
if (key, value) not in set(tagdict.items()):
dictadd[key] = value
tagger = ec2.create_tags(resource, dictadd)
gettags = ec2.get_all_tags(filters=filters)
module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True)
if state == 'absent':
if not tags:
module.fail_json(msg="tags argument is required when state is absent")
for (key, value) in set(tags.items()):
if (key, value) not in set(tagdict.items()):
baddict[key] = value
if set(baddict) == set(tags):
module.exit_json(msg="Nothing to remove here. Move along.", changed=False)
for (key, value) in set(tags.items()):
if (key, value) in set(tagdict.items()):
dictremove[key] = value
tagger = ec2.delete_tags(resource, dictremove)
gettags = ec2.get_all_tags(filters=filters)
module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True)
if state == 'list':
module.exit_json(changed=False, tags=tagdict)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -1,434 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vol
short_description: create and attach a volume, return volume id and device map
description:
- creates an EBS volume and optionally attaches it to an instance. If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made. This module has a dependency on python-boto.
version_added: "1.1"
options:
instance:
description:
- instance ID if you wish to attach the volume.
required: false
default: null
aliases: []
name:
description:
- volume Name tag if you wish to attach an existing volume (requires instance)
required: false
default: null
aliases: []
version_added: "1.6"
id:
description:
- volume id if you wish to attach an existing volume (requires instance) or remove an existing volume
required: false
default: null
aliases: []
version_added: "1.6"
volume_size:
description:
- size of volume (in GB) to create.
required: false
default: null
aliases: []
iops:
description:
- the provisioned IOPs you want to associate with this volume (integer).
required: false
default: 100
aliases: []
version_added: "1.3"
encrypted:
description:
- Enable encryption at rest for this volume.
default: false
version_added: "1.8"
device_name:
description:
- device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows.
required: false
default: null
aliases: []
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: ['aws_region', 'ec2_region']
zone:
description:
- zone in which to create the volume, if unset uses the zone the instance is in (if set)
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
snapshot:
description:
- snapshot ID on which to base the volume
required: false
default: null
version_added: "1.5"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
state:
description:
- whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8).
required: false
default: present
choices: ['absent', 'present', 'list']
version_added: "1.6"
author: Lester Wade
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Simple attachment action
- local_action:
module: ec2_vol
instance: XXXXXX
volume_size: 5
device_name: sdd
# Example using custom iops params
- local_action:
module: ec2_vol
instance: XXXXXX
volume_size: 5
iops: 200
device_name: sdd
# Example using snapshot id
- local_action:
module: ec2_vol
instance: XXXXXX
snapshot: "{{ snapshot }}"
# Playbook example combined with instance launch
- local_action:
module: ec2
keypair: "{{ keypair }}"
image: "{{ image }}"
wait: yes
count: 3
register: ec2
- local_action:
module: ec2_vol
instance: "{{ item.id }} "
volume_size: 5
with_items: ec2.instances
register: ec2_vol
# Example: Launch an instance and then add a volue if not already present
# * Nothing will happen if the volume is already attached.
# * Volume must exist in the same zone.
- local_action:
module: ec2
keypair: "{{ keypair }}"
image: "{{ image }}"
zone: YYYYYY
id: my_instance
wait: yes
count: 1
register: ec2
- local_action:
module: ec2_vol
instance: "{{ item.id }}"
name: my_existing_volume_Name_tag
device_name: /dev/xvdf
with_items: ec2.instances
register: ec2_vol
# Remove a volume
- local_action:
module: ec2_vol
id: vol-XXXXXXXX
state: absent
# List volumes for an instance
- local_action:
module: ec2_vol
instance: i-XXXXXX
state: list
'''
# Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes.
# if state=present and it doesn't exist, create, tag and attach.
# Check for state by looking for volume attachment with tag (and against block device mapping?).
# Would personally like to revisit this in May when Eucalyptus also has tagging support (3.3).
import sys
import time
from distutils.version import LooseVersion
try:
import boto.ec2
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def get_volume(module, ec2):
name = module.params.get('name')
id = module.params.get('id')
zone = module.params.get('zone')
filters = {}
volume_ids = None
if zone:
filters['availability_zone'] = zone
if name:
filters = {'tag:Name': name}
if id:
volume_ids = [id]
try:
vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if not vols:
module.fail_json(msg="Could not find volume in zone (if specified): %s" % name or id)
if len(vols) > 1:
module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
return vols[0]
def get_volumes(module, ec2):
instance = module.params.get('instance')
if not instance:
module.fail_json(msg = "Instance must be specified to get volumes")
try:
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return vols
def delete_volume(module, ec2):
vol = get_volume(module, ec2)
if not vol:
module.exit_json(changed=False)
else:
if vol.attachment_state() is not None:
adata = vol.attach_data
module.fail_json(msg="Volume %s is attached to an instance %s." % (vol.id, adata.instance_id))
ec2.delete_volume(vol.id)
module.exit_json(changed=True)
def boto_supports_volume_encryption():
"""
Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
def create_volume(module, ec2, zone):
name = module.params.get('name')
id = module.params.get('id')
instance = module.params.get('instance')
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
volume_size = module.params.get('volume_size')
snapshot = module.params.get('snapshot')
# If custom iops is defined we use volume_type "io1" rather than the default of "standard"
if iops:
volume_type = 'io1'
else:
volume_type = 'standard'
# If no instance supplied, try volume creation based on module parameters.
if name or id:
if not instance:
module.fail_json(msg = "If name or id is specified, instance must also be specified")
if iops or volume_size:
module.fail_json(msg = "Parameters are not compatible: [id or name] and [iops or volume_size]")
volume = get_volume(module, ec2)
if volume.attachment_state() is not None:
adata = volume.attach_data
if adata.instance_id != instance:
module.fail_json(msg = "Volume %s is already attached to another instance: %s"
% (name or id, adata.instance_id))
else:
module.exit_json(msg="Volume %s is already mapped on instance %s: %s" %
(name or id, adata.instance_id, adata.device),
volume_id=id,
device=adata.device,
changed=False)
else:
try:
if boto_supports_volume_encryption():
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted)
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
while volume.status != 'available':
time.sleep(3)
volume.update()
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return volume
def attach_volume(module, ec2, volume, instance):
device_name = module.params.get('device_name')
if device_name and instance:
try:
attach = volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# If device_name isn't set, make a choice based on best practices here:
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
# In future this needs to be more dynamic but combining block device mapping best practices
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
# Use password data attribute to tell whether the instance is Windows or Linux
if device_name is None and instance:
try:
if not ec2.get_password_data(instance.id):
device_name = '/dev/sdf'
attach = volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
else:
device_name = '/dev/xvdf'
attach = volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance = dict(),
id = dict(),
name = dict(),
volume_size = dict(),
iops = dict(),
encrypted = dict(),
device_name = dict(),
zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
snapshot = dict(),
state = dict(choices=['absent', 'present', 'list'], default='present')
)
)
module = AnsibleModule(argument_spec=argument_spec)
id = module.params.get('id')
name = module.params.get('name')
instance = module.params.get('instance')
volume_size = module.params.get('volume_size')
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
device_name = module.params.get('device_name')
zone = module.params.get('zone')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
ec2 = ec2_connect(module)
if state == 'list':
returned_volumes = []
vols = get_volumes(module, ec2)
for v in vols:
attachment = v.attach_data
returned_volumes.append({
'create_time': v.create_time,
'id': v.id,
'iops': v.iops,
'size': v.size,
'snapshot_id': v.snapshot_id,
'status': v.status,
'type': v.type,
'zone': v.zone,
'attachment_set': {
'attach_time': attachment.attach_time,
'device': attachment.device,
'status': attachment.status
}
})
module.exit_json(changed=False, volumes=returned_volumes)
if id and name:
module.fail_json(msg="Both id and name cannot be specified")
if encrypted and not boto_supports_volume_encryption():
module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes")
# Here we need to get the zone info for the instance. This covers situation where
# instance is specified but zone isn't.
# Useful for playbooks chaining instance launch with volume create + attach and where the
# zone doesn't matter to the user.
if instance:
reservation = ec2.get_all_instances(instance_ids=instance)
inst = reservation[0].instances[0]
zone = inst.placement
# Check if there is a volume already mounted there.
if device_name:
if device_name in inst.block_device_mapping:
module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance),
volume_id=inst.block_device_mapping[device_name].volume_id,
device=device_name,
changed=False)
# Delaying the checks until after the instance check allows us to get volume ids for existing volumes
# without needing to pass an unused volume_size
if not volume_size and not (id or name):
module.fail_json(msg="You must specify an existing volume with id or name or a volume_size")
if volume_size and (id or name):
module.fail_json(msg="Cannot specify volume_size and either one of name or id")
if state == 'absent':
delete_volume(module, ec2)
if state == 'present':
volume = create_volume(module, ec2, zone)
if instance:
attach_volume(module, ec2, volume, inst)
module.exit_json(volume_id=volume.id, device=device_name)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -1,626 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc
short_description: configure AWS virtual private clouds
description:
- Create or terminates AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "1.4"
options:
cidr_block:
description:
- "The cidr block representing the VPC, e.g. 10.0.0.0/16"
required: false, unless state=present
instance_tenancy:
description:
- "The supported tenancy options for instances launched into the VPC."
required: false
default: "default"
choices: [ "default", "dedicated" ]
dns_support:
description:
- toggles the "Enable DNS resolution" flag
required: false
default: "yes"
choices: [ "yes", "no" ]
dns_hostnames:
description:
- toggles the "Enable DNS hostname support for instances" flag
required: false
default: "yes"
choices: [ "yes", "no" ]
subnets:
description:
- 'A dictionary array of subnets to add of the form: { cidr: ..., az: ... , resource_tags: ... }. Where az is the desired availability zone of the subnet, but it is not required. Tags (i.e.: resource_tags) is also optional and use dictionary form: { "Environment":"Dev", "Tier":"Web", ...}. All VPC subnets not in this list will be removed. As of 1.8, if the subnets parameter is not specified, no existing subnets will be modified.'
required: false
default: null
aliases: []
vpc_id:
description:
- A VPC id to terminate when state=absent
required: false
default: null
aliases: []
resource_tags:
description:
- 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exits, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.'
required: true
default: null
aliases: []
version_added: "1.6"
internet_gateway:
description:
- Toggle whether there should be an Internet gateway attached to the VPC
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
route_tables:
description:
- 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},] }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.'
required: false
default: null
aliases: []
wait:
description:
- wait for the VPC to be in state 'available' before returning
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
aliases: []
state:
description:
- Create or terminate the VPC
required: true
default: present
aliases: []
region:
description:
- region in which the resource exists.
required: false
default: null
aliases: ['aws_region', 'ec2_region']
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: None
aliases: ['ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: None
aliases: ['ec2_access_key', 'access_key' ]
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
requirements: [ "boto" ]
author: Carson Gee
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic creation example:
local_action:
module: ec2_vpc
state: present
cidr_block: 172.23.0.0/16
resource_tags: { "Environment":"Development" }
region: us-west-2
# Full creation example with subnets and optional availability zones.
# The absence or presence of subnets deletes or creates them respectively.
local_action:
module: ec2_vpc
state: present
cidr_block: 172.22.0.0/16
resource_tags: { "Environment":"Development" }
subnets:
- cidr: 172.22.1.0/24
az: us-west-2c
resource_tags: { "Environment":"Dev", "Tier" : "Web" }
- cidr: 172.22.2.0/24
az: us-west-2b
resource_tags: { "Environment":"Dev", "Tier" : "App" }
- cidr: 172.22.3.0/24
az: us-west-2a
resource_tags: { "Environment":"Dev", "Tier" : "DB" }
internet_gateway: True
route_tables:
- subnets:
- 172.22.2.0/24
- 172.22.3.0/24
routes:
- dest: 0.0.0.0/0
gw: igw
- subnets:
- 172.22.1.0/24
routes:
- dest: 0.0.0.0/0
gw: igw
region: us-west-2
register: vpc
# Removal of a VPC by id
local_action:
module: ec2_vpc
state: absent
vpc_id: vpc-aaaaaaa
region: us-west-2
If you have added elements not managed by this module, e.g. instances, NATs, etc then
the delete will fail until those dependencies are removed.
'''
import sys
import time
try:
import boto.ec2
import boto.vpc
from boto.exception import EC2ResponseError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def get_vpc_info(vpc):
"""
Retrieves vpc information from an instance
ID and returns it as a dictionary
"""
return({
'id': vpc.id,
'cidr_block': vpc.cidr_block,
'dhcp_options_id': vpc.dhcp_options_id,
'region': vpc.region.name,
'state': vpc.state,
})
def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
"""
Finds a VPC that matches a specific id or cidr + tags
module : AnsibleModule object
vpc_conn: authenticated VPCConnection connection object
Returns:
A VPC object that matches either an ID or CIDR and one or more tag values
"""
if vpc_id == None and cidr == None:
module.fail_json(
msg='You must specify either a vpc_id or a cidr block + list of unique tags, aborting'
)
found_vpcs = []
resource_tags = module.params.get('resource_tags')
# Check for existing VPC by cidr_block or id
if vpc_id is not None:
found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available',})
else:
previous_vpcs = vpc_conn.get_all_vpcs(None, {'cidr': cidr, 'state': 'available'})
for vpc in previous_vpcs:
# Get all tags for each of the found VPCs
vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
# If the supplied list of ID Tags match a subset of the VPC Tags, we found our VPC
if resource_tags and set(resource_tags.items()).issubset(set(vpc_tags.items())):
found_vpcs.append(vpc)
found_vpc = None
if len(found_vpcs) == 1:
found_vpc = found_vpcs[0]
if len(found_vpcs) > 1:
module.fail_json(msg='Found more than one vpc based on the supplied criteria, aborting')
return (found_vpc)
def create_vpc(module, vpc_conn):
"""
Creates a new or modifies an existing VPC.
module : AnsibleModule object
vpc_conn: authenticated VPCConnection connection object
Returns:
A dictionary with information
about the VPC and subnets that were launched
"""
id = module.params.get('vpc_id')
cidr_block = module.params.get('cidr_block')
instance_tenancy = module.params.get('instance_tenancy')
dns_support = module.params.get('dns_support')
dns_hostnames = module.params.get('dns_hostnames')
subnets = module.params.get('subnets')
internet_gateway = module.params.get('internet_gateway')
route_tables = module.params.get('route_tables')
vpc_spec_tags = module.params.get('resource_tags')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
# Check for existing VPC by cidr_block + tags or id
previous_vpc = find_vpc(module, vpc_conn, id, cidr_block)
if previous_vpc is not None:
changed = False
vpc = previous_vpc
else:
changed = True
try:
vpc = vpc_conn.create_vpc(cidr_block, instance_tenancy)
# wait here until the vpc is available
pending = True
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time() and pending:
try:
pvpc = vpc_conn.get_all_vpcs(vpc.id)
if hasattr(pvpc, 'state'):
if pvpc.state == "available":
pending = False
elif hasattr(pvpc[0], 'state'):
if pvpc[0].state == "available":
pending = False
# sometimes vpc_conn.create_vpc() will return a vpc that can't be found yet by vpc_conn.get_all_vpcs()
# when that happens, just wait a bit longer and try again
except boto.exception.BotoServerError, e:
if e.error_code != 'InvalidVpcID.NotFound':
raise
if pending:
time.sleep(5)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "wait for vpc availability timeout on %s" % time.asctime())
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# Done with base VPC, now change to attributes and features.
# Add resource tags
vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
if not set(vpc_spec_tags.items()).issubset(set(vpc_tags.items())):
new_tags = {}
for (key, value) in set(vpc_spec_tags.items()):
if (key, value) not in set(vpc_tags.items()):
new_tags[key] = value
if new_tags:
vpc_conn.create_tags(vpc.id, new_tags)
# boto doesn't appear to have a way to determine the existing
# value of the dns attributes, so we just set them.
# It also must be done one at a time.
vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_support=dns_support)
vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_hostnames=dns_hostnames)
# Process all subnet properties
if subnets is not None:
if not isinstance(subnets, list):
module.fail_json(msg='subnets needs to be a list of cidr blocks')
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
# First add all new subnets
for subnet in subnets:
add_subnet = True
for csn in current_subnets:
if subnet['cidr'] == csn.cidr_block:
add_subnet = False
if add_subnet:
try:
new_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None))
new_subnet_tags = subnet.get('resource_tags', None)
if new_subnet_tags:
# Sometimes AWS takes its time to create a subnet and so using new subnets's id
# to create tags results in exception.
# boto doesn't seem to refresh 'state' of the newly created subnet, i.e.: it's always 'pending'
# so i resorted to polling vpc_conn.get_all_subnets with the id of the newly added subnet
while len(vpc_conn.get_all_subnets(filters={ 'subnet-id': new_subnet.id })) == 0:
time.sleep(0.1)
vpc_conn.create_tags(new_subnet.id, new_subnet_tags)
changed = True
except EC2ResponseError, e:
module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e))
# Now delete all absent subnets
for csubnet in current_subnets:
delete_subnet = True
for subnet in subnets:
if csubnet.cidr_block == subnet['cidr']:
delete_subnet = False
if delete_subnet:
try:
vpc_conn.delete_subnet(csubnet.id)
changed = True
except EC2ResponseError, e:
module.fail_json(msg='Unable to delete subnet {0}, error: {1}'.format(csubnet.cidr_block, e))
# Handle Internet gateway (create/delete igw)
igw = None
igws = vpc_conn.get_all_internet_gateways(filters={'attachment.vpc-id': vpc.id})
if len(igws) > 1:
module.fail_json(msg='EC2 returned more than one Internet Gateway for id %s, aborting' % vpc.id)
if internet_gateway:
if len(igws) != 1:
try:
igw = vpc_conn.create_internet_gateway()
vpc_conn.attach_internet_gateway(igw.id, vpc.id)
changed = True
except EC2ResponseError, e:
module.fail_json(msg='Unable to create Internet Gateway, error: {0}'.format(e))
else:
# Set igw variable to the current igw instance for use in route tables.
igw = igws[0]
else:
if len(igws) > 0:
try:
vpc_conn.detach_internet_gateway(igws[0].id, vpc.id)
vpc_conn.delete_internet_gateway(igws[0].id)
changed = True
except EC2ResponseError, e:
module.fail_json(msg='Unable to delete Internet Gateway, error: {0}'.format(e))
# Handle route tables - this may be worth splitting into a
# different module but should work fine here. The strategy to stay
# indempotent is to basically build all the route tables as
# defined, track the route table ids, and then run through the
# remote list of route tables and delete any that we didn't
# create. This shouldn't interrupt traffic in theory, but is the
# only way to really work with route tables over time that I can
# think of without using painful aws ids. Hopefully boto will add
# the replace-route-table API to make this smoother and
# allow control of the 'main' routing table.
if route_tables is not None:
if not isinstance(route_tables, list):
module.fail_json(msg='route tables need to be a list of dictionaries')
# Work through each route table and update/create to match dictionary array
all_route_tables = []
for rt in route_tables:
try:
new_rt = vpc_conn.create_route_table(vpc.id)
for route in rt['routes']:
route_kwargs = {}
if route['gw'] == 'igw':
if not internet_gateway:
module.fail_json(
msg='You asked for an Internet Gateway ' \
'(igw) route, but you have no Internet Gateway'
)
route_kwargs['gateway_id'] = igw.id
elif route['gw'].startswith('i-'):
route_kwargs['instance_id'] = route['gw']
else:
route_kwargs['gateway_id'] = route['gw']
vpc_conn.create_route(new_rt.id, route['dest'], **route_kwargs)
# Associate with subnets
for sn in rt['subnets']:
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
if len(rsn) != 1:
module.fail_json(
msg='The subnet {0} to associate with route_table {1} ' \
'does not exist, aborting'.format(sn, rt)
)
rsn = rsn[0]
# Disassociate then associate since we don't have replace
old_rt = vpc_conn.get_all_route_tables(
filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id}
)
old_rt = [ x for x in old_rt if x.id != None ]
if len(old_rt) == 1:
old_rt = old_rt[0]
association_id = None
for a in old_rt.associations:
if a.subnet_id == rsn.id:
association_id = a.id
vpc_conn.disassociate_route_table(association_id)
vpc_conn.associate_route_table(new_rt.id, rsn.id)
all_route_tables.append(new_rt)
changed = True
except EC2ResponseError, e:
module.fail_json(
msg='Unable to create and associate route table {0}, error: ' \
'{1}'.format(rt, e)
)
# Now that we are good to go on our new route tables, delete the
# old ones except the 'main' route table as boto can't set the main
# table yet.
all_rts = vpc_conn.get_all_route_tables(filters={'vpc-id': vpc.id})
for rt in all_rts:
if rt.id is None:
continue
delete_rt = True
for newrt in all_route_tables:
if newrt.id == rt.id:
delete_rt = False
break
if delete_rt:
rta = rt.associations
is_main = False
for a in rta:
if a.main:
is_main = True
break
try:
if not is_main:
vpc_conn.delete_route_table(rt.id)
changed = True
except EC2ResponseError, e:
module.fail_json(msg='Unable to delete old route table {0}, error: {1}'.format(rt.id, e))
vpc_dict = get_vpc_info(vpc)
created_vpc_id = vpc.id
returned_subnets = []
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
for sn in current_subnets:
returned_subnets.append({
'resource_tags': dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': sn.id})),
'cidr': sn.cidr_block,
'az': sn.availability_zone,
'id': sn.id,
})
return (vpc_dict, created_vpc_id, returned_subnets, changed)
def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
"""
Terminates a VPC
module: Ansible module object
vpc_conn: authenticated VPCConnection connection object
vpc_id: a vpc id to terminate
cidr: The cidr block of the VPC - can be used in lieu of an ID
Returns a dictionary of VPC information
about the VPC terminated.
If the VPC to be terminated is available
"changed" will be set to True.
"""
vpc_dict = {}
terminated_vpc_id = ''
changed = False
vpc = find_vpc(module, vpc_conn, vpc_id, cidr)
if vpc is not None:
if vpc.state == 'available':
terminated_vpc_id=vpc.id
vpc_dict=get_vpc_info(vpc)
try:
subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
for sn in subnets:
vpc_conn.delete_subnet(sn.id)
igws = vpc_conn.get_all_internet_gateways(
filters={'attachment.vpc-id': vpc.id}
)
for igw in igws:
vpc_conn.detach_internet_gateway(igw.id, vpc.id)
vpc_conn.delete_internet_gateway(igw.id)
rts = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id})
for rt in rts:
rta = rt.associations
is_main = False
for a in rta:
if a.main:
is_main = True
if not is_main:
vpc_conn.delete_route_table(rt.id)
vpc_conn.delete_vpc(vpc.id)
except EC2ResponseError, e:
module.fail_json(
msg='Unable to delete VPC {0}, error: {1}'.format(vpc.id, e)
)
changed = True
return (changed, vpc_dict, terminated_vpc_id)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
cidr_block = dict(),
instance_tenancy = dict(choices=['default', 'dedicated'], default='default'),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
subnets = dict(type='list'),
vpc_id = dict(),
internet_gateway = dict(type='bool', default=False),
resource_tags = dict(type='dict', required=True),
route_tables = dict(type='list'),
state = dict(choices=['present', 'absent'], default='present'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
state = module.params.get('state')
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
vpc_conn = boto.vpc.connect_to_region(
region,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key
)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
else:
module.fail_json(msg="region must be specified")
if module.params.get('state') == 'absent':
vpc_id = module.params.get('vpc_id')
cidr = module.params.get('cidr_block')
(changed, vpc_dict, new_vpc_id) = terminate_vpc(module, vpc_conn, vpc_id, cidr)
subnets_changed = None
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning a new VPC
(vpc_dict, new_vpc_id, subnets_changed, changed) = create_vpc(module, vpc_conn)
module.exit_json(changed=changed, vpc_id=new_vpc_id, vpc=vpc_dict, subnets=subnets_changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -1,547 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: elasticache
short_description: Manage cache clusters in Amazon Elasticache.
description:
- Manage cache clusters in Amazon Elasticache.
- Returns information about the specified cache cluster.
version_added: "1.4"
requirements: [ "boto" ]
author: Jim Dalton
options:
state:
description:
- C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed. C(rebooted) will reboot the cluster, resulting in a momentary outage.
choices: ['present', 'absent', 'rebooted']
required: true
name:
description:
- The cache cluster identifier
required: true
engine:
description:
- Name of the cache engine to be used (memcached or redis)
required: false
default: memcached
cache_engine_version:
description:
- The version number of the cache engine
required: false
default: 1.4.14
node_type:
description:
- The compute and memory capacity of the nodes in the cache cluster
required: false
default: cache.m1.small
num_nodes:
description:
- The initial number of cache nodes that the cache cluster will have
required: false
cache_port:
description:
- The port number on which each of the cache nodes will accept connections
required: false
default: 11211
security_group_ids:
description:
- A list of vpc security group names to associate with this cache cluster. Only use if inside a vpc
required: false
default: ['default']
version_added: "1.6"
cache_security_groups:
description:
- A list of cache security group names to associate with this cache cluster
required: false
default: ['default']
zone:
description:
- The EC2 Availability Zone in which the cache cluster will be created
required: false
default: None
wait:
description:
- Wait for cache cluster result before returning
required: false
default: yes
choices: [ "yes", "no" ]
hard_modify:
description:
- Whether to destroy and recreate an existing cache cluster if necessary in order to modify its state
required: false
default: no
choices: [ "yes", "no" ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: None
aliases: ['ec2_secret_key', 'secret_key']
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: None
aliases: ['ec2_access_key', 'access_key']
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic example
- local_action:
module: elasticache
name: "test-please-delete"
state: present
engine: memcached
cache_engine_version: 1.4.14
node_type: cache.m1.small
num_nodes: 1
cache_port: 11211
cache_security_groups:
- default
zone: us-east-1d
# Ensure cache cluster is gone
- local_action:
module: elasticache
name: "test-please-delete"
state: absent
# Reboot cache cluster
- local_action:
module: elasticache
name: "test-please-delete"
state: rebooted
"""
import sys
import os
import time
try:
import boto
from boto.elasticache.layer1 import ElastiCacheConnection
from boto.regioninfo import RegionInfo
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
class ElastiCacheManager(object):
"""Handles elasticache creation and destruction"""
EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
def __init__(self, module, name, engine, cache_engine_version, node_type,
num_nodes, cache_port, cache_security_groups, security_group_ids, zone, wait,
hard_modify, aws_access_key, aws_secret_key, region):
self.module = module
self.name = name
self.engine = engine
self.cache_engine_version = cache_engine_version
self.node_type = node_type
self.num_nodes = num_nodes
self.cache_port = cache_port
self.cache_security_groups = cache_security_groups
self.security_group_ids = security_group_ids
self.zone = zone
self.wait = wait
self.hard_modify = hard_modify
self.aws_access_key = aws_access_key
self.aws_secret_key = aws_secret_key
self.region = region
self.changed = False
self.data = None
self.status = 'gone'
self.conn = self._get_elasticache_connection()
self._refresh_data()
def ensure_present(self):
"""Ensure cache cluster exists or create it if not"""
if self.exists():
self.sync()
else:
self.create()
def ensure_absent(self):
"""Ensure cache cluster is gone or delete it if not"""
self.delete()
def ensure_rebooted(self):
"""Ensure cache cluster is gone or delete it if not"""
self.reboot()
def exists(self):
"""Check if cache cluster exists"""
return self.status in self.EXIST_STATUSES
def create(self):
"""Create an ElastiCache cluster"""
if self.status == 'available':
return
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
return
if self.status == 'deleting':
if self.wait:
self._wait_for_status('gone')
else:
msg = "'%s' is currently deleting. Cannot create."
self.module.fail_json(msg=msg % self.name)
try:
response = self.conn.create_cache_cluster(cache_cluster_id=self.name,
num_cache_nodes=self.num_nodes,
cache_node_type=self.node_type,
engine=self.engine,
engine_version=self.cache_engine_version,
cache_security_group_names=self.cache_security_groups,
security_group_ids=self.security_group_ids,
preferred_availability_zone=self.zone,
port=self.cache_port)
except boto.exception.BotoServerError, e:
self.module.fail_json(msg=e.message)
cache_cluster_data = response['CreateCacheClusterResponse']['CreateCacheClusterResult']['CacheCluster']
self._refresh_data(cache_cluster_data)
self.changed = True
if self.wait:
self._wait_for_status('available')
return True
def delete(self):
"""Destroy an ElastiCache cluster"""
if self.status == 'gone':
return
if self.status == 'deleting':
if self.wait:
self._wait_for_status('gone')
return
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
msg = "'%s' is currently %s. Cannot delete."
self.module.fail_json(msg=msg % (self.name, self.status))
try:
response = self.conn.delete_cache_cluster(cache_cluster_id=self.name)
except boto.exception.BotoServerError, e:
self.module.fail_json(msg=e.message)
cache_cluster_data = response['DeleteCacheClusterResponse']['DeleteCacheClusterResult']['CacheCluster']
self._refresh_data(cache_cluster_data)
self.changed = True
if self.wait:
self._wait_for_status('gone')
def sync(self):
"""Sync settings to cluster if required"""
if not self.exists():
msg = "'%s' is %s. Cannot sync."
self.module.fail_json(msg=msg % (self.name, self.status))
if self.status in ['creating', 'rebooting', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
# Cluster can only be synced if available. If we can't wait
# for this, then just be done.
return
if self._requires_destroy_and_create():
if not self.hard_modify:
msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed."
self.module.fail_json(msg=msg % self.name)
if not self.wait:
msg = "'%s' requires destructive modification. 'wait' must be set to true."
self.module.fail_json(msg=msg % self.name)
self.delete()
self.create()
return
if self._requires_modification():
self.modify()
def modify(self):
"""Modify the cache cluster. Note it's only possible to modify a few select options."""
nodes_to_remove = self._get_nodes_to_remove()
try:
response = self.conn.modify_cache_cluster(cache_cluster_id=self.name,
num_cache_nodes=self.num_nodes,
cache_node_ids_to_remove=nodes_to_remove,
cache_security_group_names=self.cache_security_groups,
security_group_ids=self.security_group_ids,
apply_immediately=True,
engine_version=self.cache_engine_version)
except boto.exception.BotoServerError, e:
self.module.fail_json(msg=e.message)
cache_cluster_data = response['ModifyCacheClusterResponse']['ModifyCacheClusterResult']['CacheCluster']
self._refresh_data(cache_cluster_data)
self.changed = True
if self.wait:
self._wait_for_status('available')
def reboot(self):
"""Reboot the cache cluster"""
if not self.exists():
msg = "'%s' is %s. Cannot reboot."
self.module.fail_json(msg=msg % (self.name, self.status))
if self.status == 'rebooting':
return
if self.status in ['creating', 'modifying']:
if self.wait:
self._wait_for_status('available')
else:
msg = "'%s' is currently %s. Cannot reboot."
self.module.fail_json(msg=msg % (self.name, self.status))
# Collect ALL nodes for reboot
cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
try:
response = self.conn.reboot_cache_cluster(cache_cluster_id=self.name,
cache_node_ids_to_reboot=cache_node_ids)
except boto.exception.BotoServerError, e:
self.module.fail_json(msg=e.message)
cache_cluster_data = response['RebootCacheClusterResponse']['RebootCacheClusterResult']['CacheCluster']
self._refresh_data(cache_cluster_data)
self.changed = True
if self.wait:
self._wait_for_status('available')
def get_info(self):
"""Return basic info about the cache cluster"""
info = {
'name': self.name,
'status': self.status
}
if self.data:
info['data'] = self.data
return info
def _wait_for_status(self, awaited_status):
"""Wait for status to change from present status to awaited_status"""
status_map = {
'creating': 'available',
'rebooting': 'available',
'modifying': 'available',
'deleting': 'gone'
}
if status_map[self.status] != awaited_status:
msg = "Invalid awaited status. '%s' cannot transition to '%s'"
self.module.fail_json(msg=msg % (self.status, awaited_status))
if awaited_status not in set(status_map.values()):
msg = "'%s' is not a valid awaited status."
self.module.fail_json(msg=msg % awaited_status)
while True:
time.sleep(1)
self._refresh_data()
if self.status == awaited_status:
break
def _requires_modification(self):
"""Check if cluster requires (nondestructive) modification"""
# Check modifiable data attributes
modifiable_data = {
'NumCacheNodes': self.num_nodes,
'EngineVersion': self.cache_engine_version
}
for key, value in modifiable_data.iteritems():
if self.data[key] != value:
return True
# Check cache security groups
cache_security_groups = []
for sg in self.data['CacheSecurityGroups']:
cache_security_groups.append(sg['CacheSecurityGroupName'])
if set(cache_security_groups) - set(self.cache_security_groups):
return True
# check vpc security groups
vpc_security_groups = []
security_groups = self.data['SecurityGroups'] or []
for sg in security_groups:
vpc_security_groups.append(sg['SecurityGroupId'])
if set(vpc_security_groups) - set(self.security_group_ids):
return True
return False
def _requires_destroy_and_create(self):
"""
Check whether a destroy and create is required to synchronize cluster.
"""
unmodifiable_data = {
'node_type': self.data['CacheNodeType'],
'engine': self.data['Engine'],
'cache_port': self._get_port()
}
# Only check for modifications if zone is specified
if self.zone is not None:
unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone']
for key, value in unmodifiable_data.iteritems():
if getattr(self, key) != value:
return True
return False
def _get_elasticache_connection(self):
"""Get an elasticache connection"""
try:
endpoint = "elasticache.%s.amazonaws.com" % self.region
connect_region = RegionInfo(name=self.region, endpoint=endpoint)
return ElastiCacheConnection(aws_access_key_id=self.aws_access_key,
aws_secret_access_key=self.aws_secret_key,
region=connect_region)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=e.message)
def _get_port(self):
"""Get the port. Where this information is retrieved from is engine dependent."""
if self.data['Engine'] == 'memcached':
return self.data['ConfigurationEndpoint']['Port']
elif self.data['Engine'] == 'redis':
# Redis only supports a single node (presently) so just use
# the first and only
return self.data['CacheNodes'][0]['Endpoint']['Port']
def _refresh_data(self, cache_cluster_data=None):
"""Refresh data about this cache cluster"""
if cache_cluster_data is None:
try:
response = self.conn.describe_cache_clusters(cache_cluster_id=self.name,
show_cache_node_info=True)
except boto.exception.BotoServerError:
self.data = None
self.status = 'gone'
return
cache_cluster_data = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'][0]
self.data = cache_cluster_data
self.status = self.data['CacheClusterStatus']
# The documentation for elasticache lies -- status on rebooting is set
# to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it
# here to make status checks etc. more sane.
if self.status == 'rebooting cache cluster nodes':
self.status = 'rebooting'
def _get_nodes_to_remove(self):
"""If there are nodes to remove, it figures out which need to be removed"""
num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes
if num_nodes_to_remove <= 0:
return None
if not self.hard_modify:
msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed."
self.module.fail_json(msg=msg % self.name)
cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
return cache_node_ids[-num_nodes_to_remove:]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['present', 'absent', 'rebooted']},
name={'required': True},
engine={'required': False, 'default': 'memcached'},
cache_engine_version={'required': False, 'default': '1.4.14'},
node_type={'required': False, 'default': 'cache.m1.small'},
num_nodes={'required': False, 'default': None, 'type': 'int'},
cache_port={'required': False, 'default': 11211, 'type': 'int'},
cache_security_groups={'required': False, 'default': ['default'],
'type': 'list'},
security_group_ids={'required': False, 'default': [],
'type': 'list'},
zone={'required': False, 'default': None},
wait={'required': False, 'type' : 'bool', 'default': True},
hard_modify={'required': False, 'type': 'bool', 'default': False}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
name = module.params['name']
state = module.params['state']
engine = module.params['engine']
cache_engine_version = module.params['cache_engine_version']
node_type = module.params['node_type']
num_nodes = module.params['num_nodes']
cache_port = module.params['cache_port']
cache_security_groups = module.params['cache_security_groups']
security_group_ids = module.params['security_group_ids']
zone = module.params['zone']
wait = module.params['wait']
hard_modify = module.params['hard_modify']
if state == 'present' and not num_nodes:
module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0")
if not region:
module.fail_json(msg=str("Either region or EC2_REGION environment variable must be set."))
elasticache_manager = ElastiCacheManager(module, name, engine,
cache_engine_version, node_type,
num_nodes, cache_port,
cache_security_groups,
security_group_ids, zone, wait,
hard_modify, aws_access_key,
aws_secret_key, region)
if state == 'present':
elasticache_manager.ensure_present()
elif state == 'absent':
elasticache_manager.ensure_absent()
elif state == 'rebooted':
elasticache_manager.ensure_rebooted()
facts_result = dict(changed=elasticache_manager.changed,
elasticache=elasticache_manager.get_info())
module.exit_json(**facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -1,420 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gc_storage
version_added: "1.4"
short_description: This module manages objects/buckets in Google Cloud Storage.
description:
- This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for information about setting the default project.
options:
bucket:
description:
- Bucket name.
required: true
default: null
aliases: []
object:
description:
- Keyname of the object inside the bucket. Can be also be used to create "virtual directories" (see examples).
required: false
default: null
aliases: []
src:
description:
- The source file path when performing a PUT operation.
required: false
default: null
aliases: []
dest:
description:
- The destination file path when downloading an object/key with a GET operation.
required: false
aliases: []
force:
description:
- Forces an overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
required: false
default: true
aliases: [ 'overwrite' ]
permission:
description:
- This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'authenticated-read'.
required: false
default: private
expiration:
description:
- Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only avaialbe when public-read is the acl for the object.
required: false
default: null
aliases: []
mode:
description:
- Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and delete (bucket).
required: true
default: null
aliases: []
choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ]
gcs_secret_key:
description:
- GCS secret key. If not set then the value of the GCS_SECRET_KEY environment variable is used.
required: true
default: null
gcs_access_key:
description:
- GCS access key. If not set then the value of the GCS_ACCESS_KEY environment variable is used.
required: true
default: null
requirements: [ "boto 2.9+" ]
author: benno@ansible.com Note. Most of the code has been taken from the S3 module.
'''
EXAMPLES = '''
# upload some content
- gc_storage: bucket=mybucket object=key.txt src=/usr/local/myfile.txt mode=put permission=public-read
# download some content
- gc_storage: bucket=mybucket object=key.txt dest=/usr/local/myfile.txt mode=get
# Download an object as a string to use else where in your playbook
- gc_storage: bucket=mybucket object=key.txt mode=get_str
# Create an empty bucket
- gc_storage: bucket=mybucket mode=create
# Create a bucket with key as directory
- gc_storage: bucket=mybucket object=/my/directory/path mode=create
# Delete a bucket and all contents
- gc_storage: bucket=mybucket mode=delete
'''
import sys
import os
import urlparse
import hashlib
try:
import boto
except ImportError:
print "failed=True msg='boto 2.9+ required for this module'"
sys.exit(1)
def grant_check(module, gs, obj):
try:
acp = obj.get_acl()
if module.params.get('permission') == 'public-read':
grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllUsers']
if not grant:
obj.set_acl('public-read')
module.exit_json(changed=True, result="The objects permission as been set to public-read")
if module.params.get('permission') == 'authenticated-read':
grant = [ x for x in acp.entries.entry_list if x.scope.type == 'AllAuthenticatedUsers']
if not grant:
obj.set_acl('authenticated-read')
module.exit_json(changed=True, result="The objects permission as been set to authenticated-read")
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
return True
def key_check(module, gs, bucket, obj):
try:
bucket = gs.lookup(bucket)
key_check = bucket.get_key(obj)
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if key_check:
grant_check(module, gs, key_check)
return True
else:
return False
def keysum(module, gs, bucket, obj):
bucket = gs.lookup(bucket)
key_check = bucket.get_key(obj)
if not key_check:
return None
md5_remote = key_check.etag[1:-1]
etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
if etag_multipart is True:
module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.")
return md5_remote
def bucket_check(module, gs, bucket):
try:
result = gs.lookup(bucket)
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if result:
grant_check(module, gs, result)
return True
else:
return False
def create_bucket(module, gs, bucket):
try:
bucket = gs.create_bucket(bucket)
bucket.set_acl(module.params.get('permission'))
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if bucket:
return True
def delete_bucket(module, gs, bucket):
try:
bucket = gs.lookup(bucket)
bucket_contents = bucket.list()
for key in bucket_contents:
bucket.delete_key(key.name)
bucket.delete()
return True
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def delete_key(module, gs, bucket, obj):
try:
bucket = gs.lookup(bucket)
bucket.delete_key(obj)
module.exit_json(msg="Object deleted from bucket ", changed=True)
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def create_dirkey(module, gs, bucket, obj):
try:
bucket = gs.lookup(bucket)
key = bucket.new_key(obj)
key.set_contents_from_string('')
module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def upload_file_check(src):
if os.path.exists(src):
file_exists is True
else:
file_exists is False
if os.path.isdir(src):
module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True)
return file_exists
def path_check(path):
if os.path.exists(path):
return True
else:
return False
def upload_gsfile(module, gs, bucket, obj, src, expiry):
try:
bucket = gs.lookup(bucket)
key = bucket.new_key(obj)
key.set_contents_from_filename(src)
key.set_acl(module.params.get('permission'))
url = key.generate_url(expiry)
module.exit_json(msg="PUT operation complete", url=url, changed=True)
except gs.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def download_gsfile(module, gs, bucket, obj, dest):
try:
bucket = gs.lookup(bucket)
key = bucket.lookup(obj)
key.get_contents_to_filename(dest)
module.exit_json(msg="GET operation complete", changed=True)
except gs.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def download_gsstr(module, gs, bucket, obj):
try:
bucket = gs.lookup(bucket)
key = bucket.lookup(obj)
contents = key.get_contents_as_string()
module.exit_json(msg="GET operation complete", contents=contents, changed=True)
except gs.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def get_download_url(module, gs, bucket, obj, expiry):
try:
bucket = gs.lookup(bucket)
key = bucket.lookup(obj)
url = key.generate_url(expiry)
module.exit_json(msg="Download url:", url=url, expiration=expiry, changed=True)
except gs.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def handle_get(module, gs, bucket, obj, overwrite, dest):
md5_remote = keysum(module, gs, bucket, obj)
md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest()
if md5_local == md5_remote:
module.exit_json(changed=False)
if md5_local != md5_remote and not overwrite:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True)
else:
download_gsfile(module, gs, bucket, obj, dest)
def handle_put(module, gs, bucket, obj, overwrite, src, expiration):
# Lets check to see if bucket exists to get ground truth.
bucket_rc = bucket_check(module, gs, bucket)
key_rc = key_check(module, gs, bucket, obj)
# Lets check key state. Does it exist and if it does, compute the etag md5sum.
if bucket_rc and key_rc:
md5_remote = keysum(module, gs, bucket, obj)
md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest()
if md5_local == md5_remote:
module.exit_json(msg="Local and remote object are identical", changed=False)
if md5_local != md5_remote and not overwrite:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True)
else:
upload_gsfile(module, gs, bucket, obj, src, expiration)
if not bucket_rc:
create_bucket(module, gs, bucket)
upload_gsfile(module, gs, bucket, obj, src, expiration)
# If bucket exists but key doesn't, just upload.
if bucket_rc and not key_rc:
upload_gsfile(module, gs, bucket, obj, src, expiration)
def handle_delete(module, gs, bucket, obj):
if bucket and not obj:
if bucket_check(module, gs, bucket):
module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=delete_bucket(module, gs, bucket))
else:
module.exit_json(msg="Bucket does not exist.", changed=False)
if bucket and obj:
if bucket_check(module, gs, bucket):
if key_check(module, gs, bucket, obj):
module.exit_json(msg="Object has been deleted.", changed=delete_key(module, gs, bucket, obj))
else:
module.exit_json(msg="Object does not exists.", changed=False)
else:
module.exit_json(msg="Bucket does not exist.", changed=False)
else:
module.fail_json(msg="Bucket or Bucket & object parameter is required.", failed=True)
def handle_create(module, gs, bucket, obj):
if bucket and not obj:
if bucket_check(module, gs, bucket):
module.exit_json(msg="Bucket already exists.", changed=False)
else:
module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, gs, bucket))
if bucket and obj:
if bucket_check(module, gs, bucket):
if obj.endswith('/'):
dirobj = obj
else:
dirobj = obj + "/"
if key_check(module, gs, bucket, dirobj):
module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False)
else:
create_dirkey(module, gs, bucket, dirobj)
else:
create_bucket(module, gs, bucket)
create_dirkey(module, gs, bucket, dirobj)
def main():
module = AnsibleModule(
argument_spec = dict(
bucket = dict(required=True),
object = dict(default=None),
src = dict(default=None),
dest = dict(default=None),
expiration = dict(default=600, aliases=['expiry']),
mode = dict(choices=['get', 'put', 'delete', 'create', 'get_url', 'get_str'], required=True),
permission = dict(choices=['private', 'public-read', 'authenticated-read'], default='private'),
gs_secret_key = dict(no_log=True, required=True),
gs_access_key = dict(required=True),
overwrite = dict(default=True, type='bool', aliases=['force']),
),
)
bucket = module.params.get('bucket')
obj = module.params.get('object')
src = module.params.get('src')
dest = module.params.get('dest')
if dest:
dest = os.path.expanduser(dest)
mode = module.params.get('mode')
expiry = module.params.get('expiration')
gs_secret_key = module.params.get('gs_secret_key')
gs_access_key = module.params.get('gs_access_key')
overwrite = module.params.get('overwrite')
if mode == 'put':
if not src or not object:
module.fail_json(msg="When using PUT, src, bucket, object are mandatory parameters")
if mode == 'get':
if not dest or not object:
module.fail_json(msg="When using GET, dest, bucket, object are mandatory parameters")
if obj:
obj = os.path.expanduser(module.params['object'])
try:
gs = boto.connect_gs(gs_access_key, gs_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
if mode == 'get':
if not bucket_check(module, gs, bucket) or not key_check(module, gs, bucket, obj):
module.fail_json(msg="Target bucket/key cannot be found", failed=True)
if not path_check(dest):
download_gsfile(module, gs, bucket, obj, dest)
else:
handle_get(module, gs, bucket, obj, overwrite, dest)
if mode == 'put':
if not path_check(src):
module.fail_json(msg="Local object for PUT does not exist", failed=True)
handle_put(module, gs, bucket, obj, overwrite, src, expiry)
# Support for deleting an object if we have both params.
if mode == 'delete':
handle_delete(module, gs, bucket, obj)
if mode == 'create':
handle_create(module, gs, bucket, obj)
if mode == 'get_url':
if bucket and obj:
if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
get_download_url(module, gs, bucket, obj, expiry)
else:
module.fail_json(msg="Key/Bucket does not exist", failed=True)
else:
module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
# --------------------------- Get the String contents of an Object -------------------------
if mode == 'get_str':
if bucket and obj:
if bucket_check(module, gs, bucket) and key_check(module, gs, bucket, obj):
download_gsstr(module, gs, bucket, obj)
else:
module.fail_json(msg="Key/Bucket does not exist", failed=True)
else:
module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
# import module snippets
from ansible.module_utils.basic import *
main()

@ -1,474 +0,0 @@
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce
version_added: "1.4"
short_description: create or terminate GCE instances
description:
- Creates or terminates Google Compute Engine (GCE) instances. See
U(https://cloud.google.com/products/compute-engine) for an overview.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
image:
description:
- image string to use for the instance
required: false
default: "debian-7"
aliases: []
instance_names:
description:
- a comma-separated list of instance names to create or destroy
required: false
default: null
aliases: []
machine_type:
description:
- machine type to use for the instance, use 'n1-standard-1' by default
required: false
default: "n1-standard-1"
aliases: []
metadata:
description:
- a hash/dictionary of custom data for the instance; '{"key":"value",...}'
required: false
default: null
aliases: []
service_account_email:
version_added: 1.5.1
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: 1.5.1
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: 1.5.1
description:
- your GCE project ID
required: false
default: null
aliases: []
name:
description:
- identifier when working with a single instance
required: false
aliases: []
network:
description:
- name of the network, 'default' will be used if not specified
required: false
default: "default"
aliases: []
persistent_boot_disk:
description:
- if set, create the instance with a persistent boot disk
required: false
default: "false"
aliases: []
disks:
description:
- a list of persistent disks to attach to the instance; a string value gives the name of the disk; alternatively, a dictionary value can define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry will be the boot disk (which must be READ_WRITE).
required: false
default: null
aliases: []
version_added: "1.7"
state:
description:
- desired state of the resource
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
tags:
description:
- a comma-separated list of tags to associate with the instance
required: false
default: null
aliases: []
zone:
description:
- the GCE zone to use
required: true
default: "us-central1-a"
aliases: []
requirements: [ "libcloud" ]
notes:
- Either I(name) or I(instance_names) is required.
author: Eric Johnson <erjohnso@google.com>
'''
EXAMPLES = '''
# Basic provisioning example. Create a single Debian 7 instance in the
# us-central1-a Zone of n1-standard-1 machine type.
- local_action:
module: gce
name: test-instance
zone: us-central1-a
machine_type: n1-standard-1
image: debian-7
# Example using defaults and with metadata to create a single 'foo' instance
- local_action:
module: gce
name: foo
metadata: '{"db":"postgres", "group":"qa", "id":500}'
# Launch instances from a control node, runs some tasks on the new instances,
# and then terminate them
- name: Create a sandbox instance
hosts: localhost
vars:
names: foo,bar
machine_type: n1-standard-1
image: debian-6
zone: us-central1-a
service_account_email: unique-email@developer.gserviceaccount.com
pem_file: /path/to/pem_file
project_id: project-id
tasks:
- name: Launch instances
local_action: gce instance_names={{names}} machine_type={{machine_type}}
image={{image}} zone={{zone}} service_account_email={{ service_account_email }}
pem_file={{ pem_file }} project_id={{ project_id }}
register: gce
- name: Wait for SSH to come up
local_action: wait_for host={{item.public_ip}} port=22 delay=10
timeout=60 state=started
with_items: {{gce.instance_data}}
- name: Configure instance(s)
hosts: launched
sudo: True
roles:
- my_awesome_role
- my_awesome_tasks
- name: Terminate instances
hosts: localhost
connection: local
tasks:
- name: Terminate instances that were previously launched
local_action:
module: gce
state: 'absent'
instance_names: {{gce.instance_names}}
'''
import sys
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support (0.13.3+) required for this module'")
sys.exit(1)
try:
from ast import literal_eval
except ImportError:
print("failed=True " + \
"msg='GCE module requires python's 'ast' module, python v2.6+'")
sys.exit(1)
def get_instance_info(inst):
"""Retrieves instance information from an instance object and returns it
as a dictionary.
"""
metadata = {}
if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
for md in inst.extra['metadata']['items']:
metadata[md['key']] = md['value']
try:
netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
except:
netname = None
if 'disks' in inst.extra:
disk_names = [disk_info['source'].split('/')[-1]
for disk_info
in sorted(inst.extra['disks'],
key=lambda disk_info: disk_info['index'])]
else:
disk_names = []
return({
'image': not inst.image is None and inst.image.split('/')[-1] or None,
'disks': disk_names,
'machine_type': inst.size,
'metadata': metadata,
'name': inst.name,
'network': netname,
'private_ip': inst.private_ips[0],
'public_ip': inst.public_ips[0],
'status': ('status' in inst.extra) and inst.extra['status'] or None,
'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
})
def create_instances(module, gce, instance_names):
"""Creates new instances. Attributes other than instance_names are picked
up from 'module'
module : AnsibleModule object
gce: authenticated GCE libcloud driver
instance_names: python list of instance names to create
Returns:
A list of dictionaries with instance information
about the instances that were launched.
"""
image = module.params.get('image')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
network = module.params.get('network')
persistent_boot_disk = module.params.get('persistent_boot_disk')
disks = module.params.get('disks')
state = module.params.get('state')
tags = module.params.get('tags')
zone = module.params.get('zone')
new_instances = []
changed = False
lc_image = gce.ex_get_image(image)
lc_disks = []
disk_modes = []
for i, disk in enumerate(disks or []):
if isinstance(disk, dict):
lc_disks.append(gce.ex_get_volume(disk['name']))
disk_modes.append(disk['mode'])
else:
lc_disks.append(gce.ex_get_volume(disk))
# boot disk is implicitly READ_WRITE
disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
lc_network = gce.ex_get_network(network)
lc_machine_type = gce.ex_get_size(machine_type)
lc_zone = gce.ex_get_zone(zone)
# Try to convert the user's metadata value into the format expected
# by GCE. First try to ensure user has proper quoting of a
# dictionary-like syntax using 'literal_eval', then convert the python
# dict into a python list of 'key' / 'value' dicts. Should end up
# with:
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
if metadata:
try:
md = literal_eval(metadata)
if not isinstance(md, dict):
raise ValueError('metadata must be a dict')
except ValueError, e:
print("failed=True msg='bad metadata: %s'" % str(e))
sys.exit(1)
except SyntaxError, e:
print("failed=True msg='bad metadata syntax'")
sys.exit(1)
items = []
for k,v in md.items():
items.append({"key": k,"value": v})
metadata = {'items': items}
# These variables all have default values but check just in case
if not lc_image or not lc_network or not lc_machine_type or not lc_zone:
module.fail_json(msg='Missing required create instance variable',
changed=False)
for name in instance_names:
pd = None
if lc_disks:
pd = lc_disks[0]
elif persistent_boot_disk:
try:
pd = gce.create_volume(None, "%s" % name, image=lc_image)
except ResourceExistsError:
pd = gce.ex_get_volume("%s" % name, lc_zone)
inst = None
try:
inst = gce.create_node(name, lc_machine_type, lc_image,
location=lc_zone, ex_network=network, ex_tags=tags,
ex_metadata=metadata, ex_boot_disk=pd)
changed = True
except ResourceExistsError:
inst = gce.ex_get_node(name, lc_zone)
except GoogleBaseError, e:
module.fail_json(msg='Unexpected error attempting to create ' + \
'instance %s, error: %s' % (name, e.value))
for i, lc_disk in enumerate(lc_disks):
# Check whether the disk is already attached
if (len(inst.extra['disks']) > i):
attached_disk = inst.extra['disks'][i]
if attached_disk['source'] != lc_disk.extra['selfLink']:
module.fail_json(
msg=("Disk at index %d does not match: requested=%s found=%s" % (
i, lc_disk.extra['selfLink'], attached_disk['source'])))
elif attached_disk['mode'] != disk_modes[i]:
module.fail_json(
msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
i, disk_modes[i], attached_disk['mode'])))
else:
continue
gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
# Work around libcloud bug: attached volumes don't get added
# to the instance metadata. get_instance_info() only cares about
# source and index.
if len(inst.extra['disks']) != i+1:
inst.extra['disks'].append(
{'source': lc_disk.extra['selfLink'], 'index': i})
if inst:
new_instances.append(inst)
instance_names = []
instance_json_data = []
for inst in new_instances:
d = get_instance_info(inst)
instance_names.append(d['name'])
instance_json_data.append(d)
return (changed, instance_json_data, instance_names)
def terminate_instances(module, gce, instance_names, zone_name):
"""Terminates a list of instances.
module: Ansible module object
gce: authenticated GCE connection object
instance_names: a list of instance names to terminate
zone_name: the zone where the instances reside prior to termination
Returns a dictionary of instance names that were terminated.
"""
changed = False
terminated_instance_names = []
for name in instance_names:
inst = None
try:
inst = gce.ex_get_node(name, zone_name)
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if inst:
gce.destroy_node(inst)
terminated_instance_names.append(inst.name)
changed = True
return (changed, terminated_instance_names)
def main():
module = AnsibleModule(
argument_spec = dict(
image = dict(default='debian-7'),
instance_names = dict(),
machine_type = dict(default='n1-standard-1'),
metadata = dict(),
name = dict(),
network = dict(default='default'),
persistent_boot_disk = dict(type='bool', default=False),
disks = dict(type='list'),
state = dict(choices=['active', 'present', 'absent', 'deleted'],
default='present'),
tags = dict(type='list'),
zone = dict(default='us-central1-a'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
gce = gce_connect(module)
image = module.params.get('image')
instance_names = module.params.get('instance_names')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
name = module.params.get('name')
network = module.params.get('network')
persistent_boot_disk = module.params.get('persistent_boot_disk')
state = module.params.get('state')
tags = module.params.get('tags')
zone = module.params.get('zone')
changed = False
inames = []
if isinstance(instance_names, list):
inames = instance_names
elif isinstance(instance_names, str):
inames = instance_names.split(',')
if name:
inames.append(name)
if not inames:
module.fail_json(msg='Must specify a "name" or "instance_names"',
changed=False)
if not zone:
module.fail_json(msg='Must specify a "zone"', changed=False)
json_output = {'zone': zone}
if state in ['absent', 'deleted']:
json_output['state'] = 'absent'
(changed, terminated_instance_names) = terminate_instances(module,
gce, inames, zone)
# based on what user specified, return the same variable, although
# value could be different if an instance could not be destroyed
if instance_names:
json_output['instance_names'] = terminated_instance_names
elif name:
json_output['name'] = name
elif state in ['active', 'present']:
json_output['state'] = 'present'
(changed, instance_data,instance_name_list) = create_instances(
module, gce, inames)
json_output['instance_data'] = instance_data
if instance_names:
json_output['instance_names'] = instance_name_list
elif name:
json_output['name'] = name
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
main()

@ -1,335 +0,0 @@
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_lb
version_added: "1.5"
short_description: create/destroy GCE load-balancer resources
description:
- This module can create and destroy Google Compute Engine C(loadbalancer)
and C(httphealthcheck) resources. The primary LB resource is the
C(load_balancer) resource and the health check parameters are all
prefixed with I(httphealthcheck).
The full documentation for Google Compute Engine load balancing is at
U(https://developers.google.com/compute/docs/load-balancing/). However,
the ansible module simplifies the configuration by following the
libcloud model.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
httphealthcheck_name:
description:
- the name identifier for the HTTP health check
required: false
default: null
httphealthcheck_port:
description:
- the TCP port to use for HTTP health checking
required: false
default: 80
httphealthcheck_path:
description:
- the url path to use for HTTP health checking
required: false
default: "/"
httphealthcheck_interval:
description:
- the duration in seconds between each health check request
required: false
default: 5
httphealthcheck_timeout:
description:
- the timeout in seconds before a request is considered a failed check
required: false
default: 5
httphealthcheck_unhealthy_count:
description:
- number of consecutive failed checks before marking a node unhealthy
required: false
default: 2
httphealthcheck_healthy_count:
description:
- number of consecutive successful checks before marking a node healthy
required: false
default: 2
httphealthcheck_host:
description:
- host header to pass through on HTTP check requests
required: false
default: null
name:
description:
- name of the load-balancer resource
required: false
default: null
protocol:
description:
- the protocol used for the load-balancer packet forwarding, tcp or udp
required: false
default: "tcp"
choices: ['tcp', 'udp']
region:
description:
- the GCE region where the load-balancer is defined
required: false
external_ip:
description:
- the external static IPv4 (or auto-assigned) address for the LB
required: false
default: null
port_range:
description:
- the port (range) to forward, e.g. 80 or 8000-8888 defaults to all ports
required: false
default: null
members:
description:
- a list of zone/nodename pairs, e.g ['us-central1-a/www-a', ...]
required: false
aliases: ['nodes']
state:
description:
- desired state of the LB
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements: [ "libcloud" ]
author: Eric Johnson <erjohnso@google.com>
'''
EXAMPLES = '''
# Simple example of creating a new LB, adding members, and a health check
- local_action:
module: gce_lb
name: testlb
region: us-central1
members: ["us-central1-a/www-a", "us-central1-b/www-b"]
httphealthcheck_name: hc
httphealthcheck_port: 80
httphealthcheck_path: "/up"
'''
import sys
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support required for this module.'")
sys.exit(1)
def main():
module = AnsibleModule(
argument_spec = dict(
httphealthcheck_name = dict(),
httphealthcheck_port = dict(default=80),
httphealthcheck_path = dict(default='/'),
httphealthcheck_interval = dict(default=5),
httphealthcheck_timeout = dict(default=5),
httphealthcheck_unhealthy_count = dict(default=2),
httphealthcheck_healthy_count = dict(default=2),
httphealthcheck_host = dict(),
name = dict(),
protocol = dict(default='tcp'),
region = dict(),
external_ip = dict(),
port_range = dict(),
members = dict(type='list'),
state = dict(default='present'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
gce = gce_connect(module)
httphealthcheck_name = module.params.get('httphealthcheck_name')
httphealthcheck_port = module.params.get('httphealthcheck_port')
httphealthcheck_path = module.params.get('httphealthcheck_path')
httphealthcheck_interval = module.params.get('httphealthcheck_interval')
httphealthcheck_timeout = module.params.get('httphealthcheck_timeout')
httphealthcheck_unhealthy_count = \
module.params.get('httphealthcheck_unhealthy_count')
httphealthcheck_healthy_count = \
module.params.get('httphealthcheck_healthy_count')
httphealthcheck_host = module.params.get('httphealthcheck_host')
name = module.params.get('name')
protocol = module.params.get('protocol')
region = module.params.get('region')
external_ip = module.params.get('external_ip')
port_range = module.params.get('port_range')
members = module.params.get('members')
state = module.params.get('state')
try:
gcelb = get_driver_lb(Provider_lb.GCE)(gce_driver=gce)
gcelb.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = False
json_output = {'name': name, 'state': state}
if not name and not httphealthcheck_name:
module.fail_json(msg='Nothing to do, please specify a "name" ' + \
'or "httphealthcheck_name" parameter', changed=False)
if state in ['active', 'present']:
# first, create the httphealthcheck if requested
hc = None
if httphealthcheck_name:
json_output['httphealthcheck_name'] = httphealthcheck_name
try:
hc = gcelb.ex_create_healthcheck(httphealthcheck_name,
host=httphealthcheck_host, path=httphealthcheck_path,
port=httphealthcheck_port,
interval=httphealthcheck_interval,
timeout=httphealthcheck_timeout,
unhealthy_threshold=httphealthcheck_unhealthy_count,
healthy_threshold=httphealthcheck_healthy_count)
changed = True
except ResourceExistsError:
hc = gce.ex_get_healthcheck(httphealthcheck_name)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if hc is not None:
json_output['httphealthcheck_host'] = hc.extra['host']
json_output['httphealthcheck_path'] = hc.path
json_output['httphealthcheck_port'] = hc.port
json_output['httphealthcheck_interval'] = hc.interval
json_output['httphealthcheck_timeout'] = hc.timeout
json_output['httphealthcheck_unhealthy_count'] = \
hc.unhealthy_threshold
json_output['httphealthcheck_healthy_count'] = \
hc.healthy_threshold
# create the forwarding rule (and target pool under the hood)
lb = None
if name:
if not region:
module.fail_json(msg='Missing required region name',
changed=False)
nodes = []
output_nodes = []
json_output['name'] = name
# members is a python list of 'zone/inst' strings
if members:
for node in members:
try:
zone, node_name = node.split('/')
nodes.append(gce.ex_get_node(node_name, zone))
output_nodes.append(node)
except:
# skip nodes that are badly formatted or don't exist
pass
try:
if hc is not None:
lb = gcelb.create_balancer(name, port_range, protocol,
None, nodes, ex_region=region, ex_healthchecks=[hc],
ex_address=external_ip)
else:
lb = gcelb.create_balancer(name, port_range, protocol,
None, nodes, ex_region=region, ex_address=external_ip)
changed = True
except ResourceExistsError:
lb = gcelb.get_balancer(name)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if lb is not None:
json_output['members'] = output_nodes
json_output['protocol'] = protocol
json_output['region'] = region
json_output['external_ip'] = lb.ip
json_output['port_range'] = lb.port
hc_names = []
if 'healthchecks' in lb.extra:
for hc in lb.extra['healthchecks']:
hc_names.append(hc.name)
json_output['httphealthchecks'] = hc_names
if state in ['absent', 'deleted']:
# first, delete the load balancer (forwarding rule and target pool)
# if specified.
if name:
json_output['name'] = name
try:
lb = gcelb.get_balancer(name)
gcelb.destroy_balancer(lb)
changed = True
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# destroy the health check if specified
if httphealthcheck_name:
json_output['httphealthcheck_name'] = httphealthcheck_name
try:
hc = gce.ex_get_healthcheck(httphealthcheck_name)
gce.ex_destroy_healthcheck(hc)
changed = True
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
main()

@ -1,271 +0,0 @@
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_net
version_added: "1.5"
short_description: create/destroy GCE networks and firewall rules
description:
- This module can create and destroy Google Compue Engine networks and
firewall rules U(https://developers.google.com/compute/docs/networking).
The I(name) parameter is reserved for referencing a network while the
I(fwname) parameter is used to reference firewall rules.
IPv4 Address ranges must be specified using the CIDR
U(http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) format.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
allowed:
description:
- the protocol:ports to allow ('tcp:80' or 'tcp:80,443' or 'tcp:80-800')
required: false
default: null
aliases: []
ipv4_range:
description:
- the IPv4 address range in CIDR notation for the network
required: false
aliases: ['cidr']
fwname:
description:
- name of the firewall rule
required: false
default: null
aliases: ['fwrule']
name:
description:
- name of the network
required: false
default: null
aliases: []
src_range:
description:
- the source IPv4 address range in CIDR notation
required: false
default: null
aliases: ['src_cidr']
src_tags:
description:
- the source instance tags for creating a firewall rule
required: false
default: null
aliases: []
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements: [ "libcloud" ]
author: Eric Johnson <erjohnso@google.com>
'''
EXAMPLES = '''
# Simple example of creating a new network
- local_action:
module: gce_net
name: privatenet
ipv4_range: '10.240.16.0/24'
# Simple example of creating a new firewall rule
- local_action:
module: gce_net
name: privatenet
allowed: tcp:80,8080
src_tags: ["web", "proxy"]
'''
import sys
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support required for this module.'")
sys.exit(1)
def format_allowed(allowed):
"""Format the 'allowed' value so that it is GCE compatible."""
if allowed.count(":") == 0:
protocol = allowed
ports = []
elif allowed.count(":") == 1:
protocol, ports = allowed.split(":")
else:
return []
if ports.count(","):
ports = ports.split(",")
else:
ports = [ports]
return_val = {"IPProtocol": protocol}
if ports:
return_val["ports"] = ports
return [return_val]
def main():
module = AnsibleModule(
argument_spec = dict(
allowed = dict(),
ipv4_range = dict(),
fwname = dict(),
name = dict(),
src_range = dict(),
src_tags = dict(type='list'),
state = dict(default='present'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
gce = gce_connect(module)
allowed = module.params.get('allowed')
ipv4_range = module.params.get('ipv4_range')
fwname = module.params.get('fwname')
name = module.params.get('name')
src_range = module.params.get('src_range')
src_tags = module.params.get('src_tags')
state = module.params.get('state')
changed = False
json_output = {'state': state}
if state in ['active', 'present']:
network = None
try:
network = gce.ex_get_network(name)
json_output['name'] = name
json_output['ipv4_range'] = network.cidr
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants to create a new network that doesn't yet exist
if name and not network:
if not ipv4_range:
module.fail_json(msg="Missing required 'ipv4_range' parameter",
changed=False)
try:
network = gce.ex_create_network(name, ipv4_range)
json_output['name'] = name
json_output['ipv4_range'] = ipv4_range
changed = True
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if fwname:
# user creating a firewall rule
if not allowed and not src_range and not src_tags:
if changed and network:
module.fail_json(
msg="Network created, but missing required " + \
"firewall rule parameter(s)", changed=True)
module.fail_json(
msg="Missing required firewall rule parameter(s)",
changed=False)
allowed_list = format_allowed(allowed)
try:
gce.ex_create_firewall(fwname, allowed_list, network=name,
source_ranges=src_range, source_tags=src_tags)
changed = True
except ResourceExistsError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['fwname'] = fwname
json_output['allowed'] = allowed
json_output['src_range'] = src_range
json_output['src_tags'] = src_tags
if state in ['absent', 'deleted']:
if fwname:
json_output['fwname'] = fwname
fw = None
try:
fw = gce.ex_get_firewall(fwname)
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if fw:
gce.ex_destroy_firewall(fw)
changed = True
if name:
json_output['name'] = name
network = None
try:
network = gce.ex_get_network(name)
# json_output['d1'] = 'found network name %s' % name
except ResourceNotFoundError:
# json_output['d2'] = 'not found network name %s' % name
pass
except Exception, e:
# json_output['d3'] = 'error with %s' % name
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if network:
# json_output['d4'] = 'deleting %s' % name
gce.ex_destroy_network(network)
# json_output['d5'] = 'deleted %s' % name
changed = True
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
main()

@ -1,285 +0,0 @@
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_pd
version_added: "1.4"
short_description: utilize GCE persistent disk resources
description:
- This module can create and destroy unformatted GCE persistent disks
U(https://developers.google.com/compute/docs/disks#persistentdisks).
It also supports attaching and detaching disks from running instances.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
detach_only:
description:
- do not destroy the disk, merely detach it from an instance
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
instance_name:
description:
- instance name if you wish to attach or detach the disk
required: false
default: null
aliases: []
mode:
description:
- GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
required: false
default: "READ_ONLY"
choices: ["READ_WRITE", "READ_ONLY"]
aliases: []
name:
description:
- name of the disk
required: true
default: null
aliases: []
size_gb:
description:
- whole integer size of disk (in GB) to create, default is 10 GB
required: false
default: 10
aliases: []
image:
description:
- the source image to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
snapshot:
description:
- the source snapshot to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
zone:
description:
- zone in which to create the disk
required: false
default: "us-central1-b"
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
requirements: [ "libcloud" ]
author: Eric Johnson <erjohnso@google.com>
'''
EXAMPLES = '''
# Simple attachment action to an existing instance
- local_action:
module: gce_pd
instance_name: notlocalhost
size_gb: 5
name: pd
'''
import sys
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, ResourceInUseError
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support is required for this module.'")
sys.exit(1)
def main():
module = AnsibleModule(
argument_spec = dict(
detach_only = dict(type='bool'),
instance_name = dict(),
mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
name = dict(required=True),
size_gb = dict(default=10),
image = dict(),
snapshot = dict(),
state = dict(default='present'),
zone = dict(default='us-central1-b'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
gce = gce_connect(module)
detach_only = module.params.get('detach_only')
instance_name = module.params.get('instance_name')
mode = module.params.get('mode')
name = module.params.get('name')
size_gb = module.params.get('size_gb')
image = module.params.get('image')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
zone = module.params.get('zone')
if detach_only and not instance_name:
module.fail_json(
msg='Must specify an instance name when detaching a disk',
changed=False)
disk = inst = None
changed = is_attached = False
json_output = { 'name': name, 'zone': zone, 'state': state }
if detach_only:
json_output['detach_only'] = True
json_output['detached_from_instance'] = instance_name
if instance_name:
# user wants to attach/detach from an existing instance
try:
inst = gce.ex_get_node(instance_name, zone)
# is the disk attached?
for d in inst.extra['disks']:
if d['deviceName'] == name:
is_attached = True
json_output['attached_mode'] = d['mode']
json_output['attached_to_instance'] = inst.name
except:
pass
# find disk if it already exists
try:
disk = gce.ex_get_volume(name)
json_output['size_gb'] = int(disk.size)
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants a disk to exist. If "instance_name" is supplied the user
# also wants it attached
if state in ['active', 'present']:
if not size_gb:
module.fail_json(msg="Must supply a size_gb", changed=False)
try:
size_gb = int(round(float(size_gb)))
if size_gb < 1:
raise Exception
except:
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
changed=False)
if instance_name and inst is None:
module.fail_json(msg='Instance %s does not exist in zone %s' % (
instance_name, zone), changed=False)
if not disk:
if image is not None and snapshot is not None:
module.fail_json(
msg='Cannot give both image (%s) and snapshot (%s)' % (
image, snapshot), changed=False)
lc_image = None
lc_snapshot = None
if image is not None:
lc_image = gce.ex_get_image(image)
elif snapshot is not None:
lc_snapshot = gce.ex_get_snapshot(snapshot)
try:
disk = gce.create_volume(
size_gb, name, location=zone, image=lc_image,
snapshot=lc_snapshot)
except ResourceExistsError:
pass
except QuotaExceededError:
module.fail_json(msg='Requested disk size exceeds quota',
changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['size_gb'] = size_gb
if image is not None:
json_output['image'] = image
if snapshot is not None:
json_output['snapshot'] = snapshot
changed = True
if inst and not is_attached:
try:
gce.attach_volume(inst, disk, device=name, ex_mode=mode)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['attached_to_instance'] = inst.name
json_output['attached_mode'] = mode
changed = True
# user wants to delete a disk (or perhaps just detach it).
if state in ['absent', 'deleted'] and disk:
if inst and is_attached:
try:
gce.detach_volume(disk, ex_node=inst)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
if not detach_only:
try:
gce.destroy_volume(disk)
except ResourceInUseError, e:
module.fail_json(msg=str(e.value), changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
main()

@ -1,260 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: glance_image
version_added: "1.2"
short_description: Add/Delete images from glance
description:
- Add or Remove images from the glance repository.
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name that has to be given to the image
required: true
default: None
disk_format:
description:
- The format of the disk that is getting uploaded
required: false
default: qcow2
container_format:
description:
- The format of the container
required: false
default: bare
owner:
description:
- The owner of the image
required: false
default: None
min_disk:
description:
- The minimum disk space required to deploy this image
required: false
default: None
min_ram:
description:
- The minimum ram required to deploy this image
required: false
default: None
is_public:
description:
- Whether the image can be accessed publicly
required: false
default: 'yes'
copy_from:
description:
- A url from where the image can be downloaded, mutually exclusive with file parameter
required: false
default: None
timeout:
description:
- The time to wait for the image process to complete in seconds
required: false
default: 180
file:
description:
- The path to the file which has to be uploaded, mutually exclusive with copy_from
required: false
default: None
endpoint_type:
description:
- The name of the glance service's endpoint URL type
choices: [publicURL, internalURL]
required: false
default: publicURL
version_added: "1.7"
requirements: ["glanceclient", "keystoneclient"]
'''
EXAMPLES = '''
# Upload an image from an HTTP URL
- glance_image: login_username=admin
login_password=passme
login_tenant_name=admin
name=cirros
container_format=bare
disk_format=qcow2
state=present
copy_from=http:launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img
'''
import time
try:
import glanceclient
from keystoneclient.v2_0 import client as ksclient
except ImportError:
print("failed=True msg='glanceclient and keystone client are required'")
def _get_ksclient(module, kwargs):
try:
client = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg="Error authenticating to the keystone: %s " % e.message)
return client
def _get_endpoint(module, client, endpoint_type):
try:
endpoint = client.service_catalog.url_for(service_type='image', endpoint_type=endpoint_type)
except Exception, e:
module.fail_json(msg="Error getting endpoint for glance: %s" % e.message)
return endpoint
def _get_glance_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint =_get_endpoint(module, _ksclient, kwargs.get('endpoint_type'))
kwargs = {
'token': token,
}
try:
client = glanceclient.Client('1', endpoint, **kwargs)
except Exception, e:
module.fail_json(msg="Error in connecting to glance: %s" % e.message)
return client
def _glance_image_present(module, params, client):
try:
for image in client.images.list():
if image.name == params['name']:
return image.id
return None
except Exception, e:
module.fail_json(msg="Error in fetching image list: %s" % e.message)
def _glance_image_create(module, params, client):
kwargs = {
'name': params.get('name'),
'disk_format': params.get('disk_format'),
'container_format': params.get('container_format'),
'owner': params.get('owner'),
'is_public': params.get('is_public'),
'copy_from': params.get('copy_from'),
}
try:
timeout = float(params.get('timeout'))
expire = time.time() + timeout
image = client.images.create(**kwargs)
if not params['copy_from']:
image.update(data=open(params['file'], 'rb'))
while time.time() < expire:
image = client.images.get(image.id)
if image.status == 'active':
break
time.sleep(5)
except Exception, e:
module.fail_json(msg="Error in creating image: %s" % e.message)
if image.status == 'active':
module.exit_json(changed=True, result=image.status, id=image.id)
else:
module.fail_json(msg=" The module timed out, please check manually " + image.status)
def _glance_delete_image(module, params, client):
try:
for image in client.images.list():
if image.name == params['name']:
client.images.delete(image)
except Exception, e:
module.fail_json(msg="Error in deleting image: %s" % e.message)
module.exit_json(changed=True, result="Deleted")
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
disk_format = dict(default='qcow2', choices=['aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']),
container_format = dict(default='bare', choices=['aki', 'ari', 'bare', 'ovf']),
owner = dict(default=None),
min_disk = dict(default=None),
min_ram = dict(default=None),
is_public = dict(default=True),
copy_from = dict(default= None),
timeout = dict(default=180),
file = dict(default=None),
endpoint_type = dict(default='publicURL', choices=['publicURL', 'internalURL']),
state = dict(default='present', choices=['absent', 'present'])
))
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['file','copy_from']],
)
if module.params['state'] == 'present':
if not module.params['file'] and not module.params['copy_from']:
module.fail_json(msg="Either file or copy_from variable should be set to create the image")
client = _get_glance_client(module, module.params)
id = _glance_image_present(module, module.params, client)
if not id:
_glance_image_create(module, module.params, client)
module.exit_json(changed=False, id=id, result="success")
if module.params['state'] == 'absent':
client = _get_glance_client(module, module.params)
id = _glance_image_present(module, module.params, client)
if not id:
module.exit_json(changed=False, result="Success")
else:
_glance_delete_image(module, module.params, client)
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

@ -1,394 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Based on Jimmy Tang's implementation
DOCUMENTATION = '''
---
module: keystone_user
version_added: "1.2"
short_description: Manage OpenStack Identity (keystone) users, tenants and roles
description:
- Manage users,tenants, roles from OpenStack.
options:
login_user:
description:
- login username to authenticate to keystone
required: false
default: admin
login_password:
description:
- Password of login user
required: false
default: 'yes'
login_tenant_name:
description:
- The tenant login_user belongs to
required: false
default: None
version_added: "1.3"
token:
description:
- The token to be uses in case the password is not specified
required: false
default: None
endpoint:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
user:
description:
- The name of the user that has to added/removed from OpenStack
required: false
default: None
password:
description:
- The password to be assigned to the user
required: false
default: None
tenant:
description:
- The tenant name that has be added/removed
required: false
default: None
tenant_description:
description:
- A description for the tenant
required: false
default: None
email:
description:
- An email address for the user
required: false
default: None
role:
description:
- The name of the role to be assigned or created
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
requirements: [ python-keystoneclient ]
author: Lorin Hochstein
'''
EXAMPLES = '''
# Create a tenant
- keystone_user: tenant=demo tenant_description="Default Tenant"
# Create a user
- keystone_user: user=john tenant=demo password=secrete
# Apply the admin role to the john user in the demo tenant
- keystone_user: role=admin user=john tenant=demo
'''
try:
from keystoneclient.v2_0 import client
except ImportError:
keystoneclient_found = False
else:
keystoneclient_found = True
def authenticate(endpoint, token, login_user, login_password, login_tenant_name):
"""Return a keystone client object"""
if token:
return client.Client(endpoint=endpoint, token=token)
else:
return client.Client(auth_url=endpoint, username=login_user,
password=login_password, tenant_name=login_tenant_name)
def tenant_exists(keystone, tenant):
""" Return True if tenant already exists"""
return tenant in [x.name for x in keystone.tenants.list()]
def user_exists(keystone, user):
"""" Return True if user already exists"""
return user in [x.name for x in keystone.users.list()]
def get_tenant(keystone, name):
""" Retrieve a tenant by name"""
tenants = [x for x in keystone.tenants.list() if x.name == name]
count = len(tenants)
if count == 0:
raise KeyError("No keystone tenants with name %s" % name)
elif count > 1:
raise ValueError("%d tenants with name %s" % (count, name))
else:
return tenants[0]
def get_user(keystone, name):
""" Retrieve a user by name"""
users = [x for x in keystone.users.list() if x.name == name]
count = len(users)
if count == 0:
raise KeyError("No keystone users with name %s" % name)
elif count > 1:
raise ValueError("%d users with name %s" % (count, name))
else:
return users[0]
def get_role(keystone, name):
""" Retrieve a role by name"""
roles = [x for x in keystone.roles.list() if x.name == name]
count = len(roles)
if count == 0:
raise KeyError("No keystone roles with name %s" % name)
elif count > 1:
raise ValueError("%d roles with name %s" % (count, name))
else:
return roles[0]
def get_tenant_id(keystone, name):
return get_tenant(keystone, name).id
def get_user_id(keystone, name):
return get_user(keystone, name).id
def ensure_tenant_exists(keystone, tenant_name, tenant_description,
check_mode):
""" Ensure that a tenant exists.
Return (True, id) if a new tenant was created, (False, None) if it
already existed.
"""
# Check if tenant already exists
try:
tenant = get_tenant(keystone, tenant_name)
except KeyError:
# Tenant doesn't exist yet
pass
else:
if tenant.description == tenant_description:
return (False, tenant.id)
else:
# We need to update the tenant description
if check_mode:
return (True, tenant.id)
else:
tenant.update(description=tenant_description)
return (True, tenant.id)
# We now know we will have to create a new tenant
if check_mode:
return (True, None)
ks_tenant = keystone.tenants.create(tenant_name=tenant_name,
description=tenant_description,
enabled=True)
return (True, ks_tenant.id)
def ensure_tenant_absent(keystone, tenant, check_mode):
""" Ensure that a tenant does not exist
Return True if the tenant was removed, False if it didn't exist
in the first place
"""
if not tenant_exists(keystone, tenant):
return False
# We now know we will have to delete the tenant
if check_mode:
return True
def ensure_user_exists(keystone, user_name, password, email, tenant_name,
check_mode):
""" Check if user exists
Return (True, id) if a new user was created, (False, id) user alrady
exists
"""
# Check if tenant already exists
try:
user = get_user(keystone, user_name)
except KeyError:
# Tenant doesn't exist yet
pass
else:
# User does exist, we're done
return (False, user.id)
# We now know we will have to create a new user
if check_mode:
return (True, None)
tenant = get_tenant(keystone, tenant_name)
user = keystone.users.create(name=user_name, password=password,
email=email, tenant_id=tenant.id)
return (True, user.id)
def ensure_role_exists(keystone, user_name, tenant_name, role_name,
check_mode):
""" Check if role exists
Return (True, id) if a new role was created or if the role was newly
assigned to the user for the tenant. (False, id) if the role already
exists and was already assigned to the user ofr the tenant.
"""
# Check if the user has the role in the tenant
user = get_user(keystone, user_name)
tenant = get_tenant(keystone, tenant_name)
roles = [x for x in keystone.roles.roles_for_user(user, tenant)
if x.name == role_name]
count = len(roles)
if count == 1:
# If the role is in there, we are done
role = roles[0]
return (False, role.id)
elif count > 1:
# Too many roles with the same name, throw an error
raise ValueError("%d roles with name %s" % (count, role_name))
# At this point, we know we will need to make changes
if check_mode:
return (True, None)
# Get the role if it exists
try:
role = get_role(keystone, role_name)
except KeyError:
# Role doesn't exist yet
role = keystone.roles.create(role_name)
# Associate the role with the user in the admin
keystone.roles.add_user_role(user, role, tenant)
return (True, role.id)
def ensure_user_absent(keystone, user, check_mode):
raise NotImplementedError("Not yet implemented")
def ensure_role_absent(keystone, uesr, tenant, role, check_mode):
raise NotImplementedError("Not yet implemented")
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
tenant_description=dict(required=False),
email=dict(required=False),
role=dict(required=False),
state=dict(default='present', choices=['present', 'absent']),
endpoint=dict(required=False,
default="http://127.0.0.1:35357/v2.0"),
token=dict(required=False),
login_user=dict(required=False),
login_password=dict(required=False),
login_tenant_name=dict(required=False)
))
# keystone operations themselves take an endpoint, not a keystone auth_url
del(argument_spec['auth_url'])
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['token', 'login_user'],
['token', 'login_password'],
['token', 'login_tenant_name']]
)
if not keystoneclient_found:
module.fail_json(msg="the python-keystoneclient module is required")
user = module.params['user']
password = module.params['password']
tenant = module.params['tenant']
tenant_description = module.params['tenant_description']
email = module.params['email']
role = module.params['role']
state = module.params['state']
endpoint = module.params['endpoint']
token = module.params['token']
login_user = module.params['login_user']
login_password = module.params['login_password']
login_tenant_name = module.params['login_tenant_name']
keystone = authenticate(endpoint, token, login_user, login_password, login_tenant_name)
check_mode = module.check_mode
try:
d = dispatch(keystone, user, password, tenant, tenant_description,
email, role, state, endpoint, token, login_user,
login_password, check_mode)
except Exception, e:
if check_mode:
# If we have a failure in check mode
module.exit_json(changed=True,
msg="exception: %s" % e)
else:
module.fail_json(msg="exception: %s" % e)
else:
module.exit_json(**d)
def dispatch(keystone, user=None, password=None, tenant=None,
tenant_description=None, email=None, role=None,
state="present", endpoint=None, token=None, login_user=None,
login_password=None, check_mode=False):
""" Dispatch to the appropriate method.
Returns a dict that will be passed to exit_json
tenant user role state
------ ---- ---- --------
X present ensure_tenant_exists
X absent ensure_tenant_absent
X X present ensure_user_exists
X X absent ensure_user_absent
X X X present ensure_role_exists
X X X absent ensure_role_absent
"""
changed = False
id = None
if tenant and not user and not role and state == "present":
changed, id = ensure_tenant_exists(keystone, tenant,
tenant_description, check_mode)
elif tenant and not user and not role and state == "absent":
changed = ensure_tenant_absent(keystone, tenant, check_mode)
elif tenant and user and not role and state == "present":
changed, id = ensure_user_exists(keystone, user, password,
email, tenant, check_mode)
elif tenant and user and not role and state == "absent":
changed = ensure_user_absent(keystone, user, check_mode)
elif tenant and user and role and state == "present":
changed, id = ensure_role_exists(keystone, user, tenant, role,
check_mode)
elif tenant and user and role and state == "absent":
changed = ensure_role_absent(keystone, user, tenant, role, check_mode)
else:
# Should never reach here
raise ValueError("Code should never reach here")
return dict(changed=changed, id=id)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

@ -1,493 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: linode
short_description: create / delete / stop / restart an instance in Linode Public Cloud
description:
- creates / deletes a Linode Public Cloud instance and optionally waits for it to be 'running'.
version_added: "1.3"
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'active', 'started', 'absent', 'deleted', 'stopped', 'restarted']
default: present
api_key:
description:
- Linode API key
default: null
name:
description:
- Name to give the instance (alphanumeric, dashes, underscore)
- To keep sanity on the Linode Web Console, name is prepended with LinodeID_
default: null
type: string
linode_id:
description:
- Unique ID of a linode server
aliases: lid
default: null
type: integer
plan:
description:
- plan to use for the instance (Linode plan)
default: null
type: integer
payment_term:
description:
- payment term to use for the instance (payment term in months)
default: 1
type: integer
choices: [1, 12, 24]
password:
description:
- root password to apply to a new server (auto generated if missing)
default: null
type: string
ssh_pub_key:
description:
- SSH public key applied to root user
default: null
type: string
swap:
description:
- swap size in MB
default: 512
type: integer
distribution:
description:
- distribution to use for the instance (Linode Distribution)
default: null
type: integer
datacenter:
description:
- datacenter to create an instance in (Linode Datacenter)
default: null
type: integer
wait:
description:
- wait for the instance to be in state 'running' before returning
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
requirements: [ "linode-python", "pycurl" ]
author: Vincent Viallet
notes:
- LINODE_API_KEY env variable can be used instead
'''
EXAMPLES = '''
# Create a server
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
plan: 1
datacenter: 2
distribution: 99
password: 'superSecureRootPassword'
ssh_pub_key: 'ssh-rsa qwerty'
swap: 768
wait: yes
wait_timeout: 600
state: present
# Ensure a running server (create if missing)
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
plan: 1
datacenter: 2
distribution: 99
password: 'superSecureRootPassword'
ssh_pub_key: 'ssh-rsa qwerty'
swap: 768
wait: yes
wait_timeout: 600
state: present
# Delete a server
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
state: absent
# Stop a server
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
state: stopped
# Reboot a server
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
state: restarted
'''
import sys
import time
import os
try:
import pycurl
except ImportError:
print("failed=True msg='pycurl required for this module'")
sys.exit(1)
try:
from linode import api as linode_api
except ImportError:
print("failed=True msg='linode-python required for this module'")
sys.exit(1)
def randompass():
'''
Generate a long random password that comply to Linode requirements
'''
# Linode API currently requires the following:
# It must contain at least two of these four character classes:
# lower case letters - upper case letters - numbers - punctuation
# we play it safe :)
import random
import string
# as of python 2.4, this reseeds the PRNG from urandom
random.seed()
lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6))
upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6))
number = ''.join(random.choice(string.digits) for x in range(6))
punct = ''.join(random.choice(string.punctuation) for x in range(6))
p = lower + upper + number + punct
return ''.join(random.sample(p, len(p)))
def getInstanceDetails(api, server):
'''
Return the details of an instance, populating IPs, etc.
'''
instance = {'id': server['LINODEID'],
'name': server['LABEL'],
'public': [],
'private': []}
# Populate with ips
for ip in api.linode_ip_list(LinodeId=server['LINODEID']):
if ip['ISPUBLIC'] and 'ipv4' not in instance:
instance['ipv4'] = ip['IPADDRESS']
instance['fqdn'] = ip['RDNS_NAME']
if ip['ISPUBLIC']:
instance['public'].append({'ipv4': ip['IPADDRESS'],
'fqdn': ip['RDNS_NAME'],
'ip_id': ip['IPADDRESSID']})
else:
instance['private'].append({'ipv4': ip['IPADDRESS'],
'fqdn': ip['RDNS_NAME'],
'ip_id': ip['IPADDRESSID']})
return instance
def linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id,
payment_term, password, ssh_pub_key, swap, wait, wait_timeout):
instances = []
changed = False
new_server = False
servers = []
disks = []
configs = []
jobs = []
# See if we can match an existing server details with the provided linode_id
if linode_id:
# For the moment we only consider linode_id as criteria for match
# Later we can use more (size, name, etc.) and update existing
servers = api.linode_list(LinodeId=linode_id)
# Attempt to fetch details about disks and configs only if servers are
# found with linode_id
if servers:
disks = api.linode_disk_list(LinodeId=linode_id)
configs = api.linode_config_list(LinodeId=linode_id)
# Act on the state
if state in ('active', 'present', 'started'):
# TODO: validate all the plan / distribution / datacenter are valid
# Multi step process/validation:
# - need linode_id (entity)
# - need disk_id for linode_id - create disk from distrib
# - need config_id for linode_id - create config (need kernel)
# Any create step triggers a job that need to be waited for.
if not servers:
for arg in ('name', 'plan', 'distribution', 'datacenter'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
# Create linode entity
new_server = True
try:
res = api.linode_create(DatacenterID=datacenter, PlanID=plan,
PaymentTerm=payment_term)
linode_id = res['LinodeID']
# Update linode Label to match name
api.linode_update(LinodeId=linode_id, Label='%s_%s' % (linode_id, name))
# Save server
servers = api.linode_list(LinodeId=linode_id)
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
if not disks:
for arg in ('name', 'linode_id', 'distribution'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
# Create disks (1 from distrib, 1 for SWAP)
new_server = True
try:
if not password:
# Password is required on creation, if not provided generate one
password = randompass()
if not swap:
swap = 512
# Create data disk
size = servers[0]['TOTALHD'] - swap
if ssh_pub_key:
res = api.linode_disk_createfromdistribution(
LinodeId=linode_id, DistributionID=distribution,
rootPass=password, rootSSHKey=ssh_pub_key,
Label='%s data disk (lid: %s)' % (name, linode_id), Size=size)
else:
res = api.linode_disk_createfromdistribution(
LinodeId=linode_id, DistributionID=distribution, rootPass=password,
Label='%s data disk (lid: %s)' % (name, linode_id), Size=size)
jobs.append(res['JobID'])
# Create SWAP disk
res = api.linode_disk_create(LinodeId=linode_id, Type='swap',
Label='%s swap disk (lid: %s)' % (name, linode_id),
Size=swap)
jobs.append(res['JobID'])
except Exception, e:
# TODO: destroy linode ?
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
if not configs:
for arg in ('name', 'linode_id', 'distribution'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
# Check architecture
for distrib in api.avail_distributions():
if distrib['DISTRIBUTIONID'] != distribution:
continue
arch = '32'
if distrib['IS64BIT']:
arch = '64'
break
# Get latest kernel matching arch
for kernel in api.avail_kernels():
if not kernel['LABEL'].startswith('Latest %s' % arch):
continue
kernel_id = kernel['KERNELID']
break
# Get disk list
disks_id = []
for disk in api.linode_disk_list(LinodeId=linode_id):
if disk['TYPE'] == 'ext3':
disks_id.insert(0, str(disk['DISKID']))
continue
disks_id.append(str(disk['DISKID']))
# Trick to get the 9 items in the list
while len(disks_id) < 9:
disks_id.append('')
disks_list = ','.join(disks_id)
# Create config
new_server = True
try:
api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id,
Disklist=disks_list, Label='%s config' % name)
configs = api.linode_config_list(LinodeId=linode_id)
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
# Start / Ensure servers are running
for server in servers:
# Refresh server state
server = api.linode_list(LinodeId=server['LINODEID'])[0]
# Ensure existing servers are up and running, boot if necessary
if server['STATUS'] != 1:
res = api.linode_boot(LinodeId=linode_id)
jobs.append(res['JobID'])
changed = True
# wait here until the instances are up
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time():
# refresh the server details
server = api.linode_list(LinodeId=server['LINODEID'])[0]
# status:
# -2: Boot failed
# 1: Running
if server['STATUS'] in (-2, 1):
break
time.sleep(5)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = 'Timeout waiting on %s (lid: %s)' %
(server['LABEL'], server['LINODEID']))
# Get a fresh copy of the server details
server = api.linode_list(LinodeId=server['LINODEID'])[0]
if server['STATUS'] == -2:
module.fail_json(msg = '%s (lid: %s) failed to boot' %
(server['LABEL'], server['LINODEID']))
# From now on we know the task is a success
# Build instance report
instance = getInstanceDetails(api, server)
# depending on wait flag select the status
if wait:
instance['status'] = 'Running'
else:
instance['status'] = 'Starting'
# Return the root password if this is a new box and no SSH key
# has been provided
if new_server and not ssh_pub_key:
instance['password'] = password
instances.append(instance)
elif state in ('stopped'):
for arg in ('name', 'linode_id'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
if not servers:
module.fail_json(msg = 'Server %s (lid: %s) not found' % (name, linode_id))
for server in servers:
instance = getInstanceDetails(api, server)
if server['STATUS'] != 2:
try:
res = api.linode_shutdown(LinodeId=linode_id)
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
instance['status'] = 'Stopping'
changed = True
else:
instance['status'] = 'Stopped'
instances.append(instance)
elif state in ('restarted'):
for arg in ('name', 'linode_id'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
if not servers:
module.fail_json(msg = 'Server %s (lid: %s) not found' % (name, linode_id))
for server in servers:
instance = getInstanceDetails(api, server)
try:
res = api.linode_reboot(LinodeId=server['LINODEID'])
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
instance['status'] = 'Restarting'
changed = True
instances.append(instance)
elif state in ('absent', 'deleted'):
for server in servers:
instance = getInstanceDetails(api, server)
try:
api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True)
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
instance['status'] = 'Deleting'
changed = True
instances.append(instance)
# Ease parsing if only 1 instance
if len(instances) == 1:
module.exit_json(changed=changed, instance=instances[0])
module.exit_json(changed=changed, instances=instances)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['active', 'present', 'started',
'deleted', 'absent', 'stopped',
'restarted']),
api_key = dict(),
name = dict(type='str'),
plan = dict(type='int'),
distribution = dict(type='int'),
datacenter = dict(type='int'),
linode_id = dict(type='int', aliases=['lid']),
payment_term = dict(type='int', default=1, choices=[1, 12, 24]),
password = dict(type='str'),
ssh_pub_key = dict(type='str'),
swap = dict(type='int', default=512),
wait = dict(type='bool', default=True),
wait_timeout = dict(default=300),
)
)
state = module.params.get('state')
api_key = module.params.get('api_key')
name = module.params.get('name')
plan = module.params.get('plan')
distribution = module.params.get('distribution')
datacenter = module.params.get('datacenter')
linode_id = module.params.get('linode_id')
payment_term = module.params.get('payment_term')
password = module.params.get('password')
ssh_pub_key = module.params.get('ssh_pub_key')
swap = module.params.get('swap')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
# Setup the api_key
if not api_key:
try:
api_key = os.environ['LINODE_API_KEY']
except KeyError, e:
module.fail_json(msg = 'Unable to load %s' % e.message)
# setup the auth
try:
api = linode_api.Api(api_key)
api.test_echo()
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id,
payment_term, password, ssh_pub_key, swap, wait, wait_timeout)
# import module snippets
from ansible.module_utils.basic import *
main()

@ -1,585 +0,0 @@
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
# (c) 2013, John Dewey <john@dewey.ws>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import operator
import os
try:
from novaclient.v1_1 import client as nova_client
from novaclient.v1_1 import floating_ips
from novaclient import exceptions
from novaclient import utils
import time
except ImportError:
print("failed=True msg='novaclient is required for this module'")
DOCUMENTATION = '''
---
module: nova_compute
version_added: "1.2"
short_description: Create/Delete VMs from OpenStack
description:
- Create or Remove virtual machines from Openstack.
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name that has to be given to the instance
required: true
default: None
image_id:
description:
- The id of the base image to boot. Mutually exclusive with image_name
required: true
default: None
image_name:
description:
- The name of the base image to boot. Mutually exclusive with image_id
required: true
default: None
version_added: "1.8"
image_exclude:
description:
- Text to use to filter image names, for the case, such as HP, where there are multiple image names matching the common identifying portions. image_exclude is a negative match filter - it is text that may not exist in the image name. Defaults to "(deprecated)"
version_added: "1.8"
flavor_id:
description:
- The id of the flavor in which the new VM has to be created. Mutually exclusive with flavor_ram
required: false
default: 1
flavor_ram:
description:
- The minimum amount of ram in MB that the flavor in which the new VM has to be created must have. Mutually exclusive with flavor_id
required: false
default: 1
version_added: "1.8"
flavor_include:
description:
- Text to use to filter flavor names, for the case, such as Rackspace, where there are multiple flavors that have the same ram count. flavor_include is a positive match filter - it must exist in the flavor name.
version_added: "1.8"
key_name:
description:
- The key pair name to be used when creating a VM
required: false
default: None
security_groups:
description:
- The name of the security group to which the VM should be added
required: false
default: None
nics:
description:
- A list of network id's to which the VM's interface should be attached
required: false
default: None
auto_floating_ip:
description:
- Should a floating ip be auto created and assigned
required: false
default: 'yes'
version_added: "1.8"
floating_ips:
decription:
- list of valid floating IPs that pre-exist to assign to this node
required: false
default: None
version_added: "1.8"
floating_ip_pools:
description:
- list of floating IP pools from which to choose a floating IP
required: false
default: None
version_added: "1.8"
availability_zone:
description:
- Name of the availability zone
required: false
default: None
version_added: "1.8"
meta:
description:
- A list of key value pairs that should be provided as a metadata to the new VM
required: false
default: None
wait:
description:
- If the module should wait for the VM to be created.
required: false
default: 'yes'
wait_for:
description:
- The amount of time the module should wait for the VM to get into active state
required: false
default: 180
config_drive:
description:
- Whether to boot the server with config drive enabled
required: false
default: 'no'
version_added: "1.8"
user_data:
description:
- Opaque blob of data which is made available to the instance
required: false
default: None
version_added: "1.6"
requirements: ["novaclient"]
'''
EXAMPLES = '''
# Creates a new VM and attaches to a network and passes metadata to the instance
- nova_compute:
state: present
login_username: admin
login_password: admin
login_tenant_name: admin
name: vm1
image_id: 4f905f38-e52a-43d2-b6ec-754a13ffb529
key_name: ansible_key
wait_for: 200
flavor_id: 4
nics:
- net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723
meta:
hostname: test1
group: uge_master
# Creates a new VM in HP Cloud AE1 region availability zone az2 and automatically assigns a floating IP
- name: launch a nova instance
hosts: localhost
tasks:
- name: launch an instance
nova_compute:
state: present
login_username: username
login_password: Equality7-2521
login_tenant_name: username-project1
name: vm1
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
region_name: region-b.geo-1
availability_zone: az2
image_id: 9302692b-b787-4b52-a3a6-daebb79cb498
key_name: test
wait_for: 200
flavor_id: 101
security_groups: default
auto_floating_ip: yes
# Creates a new VM in HP Cloud AE1 region availability zone az2 and assigns a pre-known floating IP
- name: launch a nova instance
hosts: localhost
tasks:
- name: launch an instance
nova_compute:
state: present
login_username: username
login_password: Equality7-2521
login_tenant_name: username-project1
name: vm1
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
region_name: region-b.geo-1
availability_zone: az2
image_id: 9302692b-b787-4b52-a3a6-daebb79cb498
key_name: test
wait_for: 200
flavor_id: 101
floating-ips:
- 12.34.56.79
# Creates a new VM with 4G of RAM on Ubuntu Trusty, ignoring deprecated images
- name: launch a nova instance
hosts: localhost
tasks:
- name: launch an instance
nova_compute:
name: vm1
state: present
login_username: username
login_password: Equality7-2521
login_tenant_name: username-project1
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
region_name: region-b.geo-1
image_name: Ubuntu Server 14.04
image_exclude: deprecated
flavor_ram: 4096
# Creates a new VM with 4G of RAM on Ubuntu Trusty on a Rackspace Performance node in DFW
- name: launch a nova instance
hosts: localhost
tasks:
- name: launch an instance
nova_compute:
name: vm1
state: present
login_username: username
login_password: Equality7-2521
login_tenant_name: username-project1
auth_url: https://identity.api.rackspacecloud.com/v2.0/
region_name: DFW
image_name: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
flavor_ram: 4096
flavor_include: Performance
'''
def _delete_server(module, nova):
name = None
server_list = None
try:
server_list = nova.servers.list(True, {'name': module.params['name']})
if server_list:
server = [x for x in server_list if x.name == module.params['name']]
nova.servers.delete(server.pop())
except Exception, e:
module.fail_json( msg = "Error in deleting vm: %s" % e.message)
if module.params['wait'] == 'no':
module.exit_json(changed = True, result = "deleted")
expire = time.time() + int(module.params['wait_for'])
while time.time() < expire:
name = nova.servers.list(True, {'name': module.params['name']})
if not name:
module.exit_json(changed = True, result = "deleted")
time.sleep(5)
module.fail_json(msg = "Timed out waiting for server to get deleted, please check manually")
def _add_floating_ip_from_pool(module, nova, server):
# instantiate FloatingIPManager object
floating_ip_obj = floating_ips.FloatingIPManager(nova)
# empty dict and list
usable_floating_ips = {}
pools = []
# user specified
pools = module.params['floating_ip_pools']
# get the list of all floating IPs. Mileage may
# vary according to Nova Compute configuration
# per cloud provider
all_floating_ips = floating_ip_obj.list()
# iterate through all pools of IP address. Empty
# string means all and is the default value
for pool in pools:
# temporary list per pool
pool_ips = []
# loop through all floating IPs
for f_ip in all_floating_ips:
# if not reserved and the correct pool, add
if f_ip.instance_id is None and (f_ip.pool == pool):
pool_ips.append(f_ip.ip)
# only need one
break
# if the list is empty, add for this pool
if not pool_ips:
try:
new_ip = nova.floating_ips.create(pool)
except Exception, e:
module.fail_json(msg = "Unable to create floating ip")
pool_ips.append(new_ip.ip)
# Add to the main list
usable_floating_ips[pool] = pool_ips
# finally, add ip(s) to instance for each pool
for pool in usable_floating_ips:
for ip in usable_floating_ips[pool]:
try:
server.add_floating_ip(ip)
# We only need to assign one ip - but there is an inherent
# race condition and some other cloud operation may have
# stolen an available floating ip
break
except Exception, e:
module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message))
def _add_floating_ip_list(module, server, ips):
# add ip(s) to instance
for ip in ips:
try:
server.add_floating_ip(ip)
except Exception, e:
module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message))
def _add_auto_floating_ip(module, nova, server):
try:
new_ip = nova.floating_ips.create()
except Exception as e:
module.fail_json(msg = "Unable to create floating ip: %s" % (e.message))
try:
server.add_floating_ip(new_ip)
except Exception as e:
# Clean up - we auto-created this ip, and it's not attached
# to the server, so the cloud will not know what to do with it
server.floating_ips.delete(new_ip)
module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message))
def _add_floating_ip(module, nova, server):
if module.params['floating_ip_pools']:
_add_floating_ip_from_pool(module, nova, server)
elif module.params['floating_ips']:
_add_floating_ip_list(module, server, module.params['floating_ips'])
elif module.params['auto_floating_ip']:
_add_auto_floating_ip(module, nova, server)
else:
return server
# this may look redundant, but if there is now a
# floating IP, then it needs to be obtained from
# a recent server object if the above code path exec'd
try:
server = nova.servers.get(server.id)
except Exception, e:
module.fail_json(msg = "Error in getting info from instance: %s " % e.message)
return server
def _get_image_id(module, nova):
if module.params['image_name']:
for image in nova.images.list():
if (module.params['image_name'] in image.name and (
not module.params['image_exclude']
or module.params['image_exclude'] not in image.name)):
return image.id
module.fail_json(msg = "Error finding image id from name(%s)" % module.params['image_name'])
return module.params['image_id']
def _get_flavor_id(module, nova):
if module.params['flavor_ram']:
for flavor in sorted(nova.flavors.list(), key=operator.attrgetter('ram')):
if (flavor.ram >= module.params['flavor_ram'] and
(not module.params['flavor_include'] or module.params['flavor_include'] in flavor.name)):
return flavor.id
module.fail_json(msg = "Error finding flavor with %sMB of RAM" % module.params['flavor_ram'])
return module.params['flavor_id']
def _create_server(module, nova):
image_id = _get_image_id(module, nova)
flavor_id = _get_flavor_id(module, nova)
bootargs = [module.params['name'], image_id, flavor_id]
bootkwargs = {
'nics' : module.params['nics'],
'meta' : module.params['meta'],
'security_groups': module.params['security_groups'].split(','),
#userdata is unhyphenated in novaclient, but hyphenated here for consistency with the ec2 module:
'userdata': module.params['user_data'],
'config_drive': module.params['config_drive'],
}
for optional_param in ('region_name', 'key_name', 'availability_zone'):
if module.params[optional_param]:
bootkwargs[optional_param] = module.params[optional_param]
try:
server = nova.servers.create(*bootargs, **bootkwargs)
server = nova.servers.get(server.id)
except Exception, e:
module.fail_json( msg = "Error in creating instance: %s " % e.message)
if module.params['wait'] == 'yes':
expire = time.time() + int(module.params['wait_for'])
while time.time() < expire:
try:
server = nova.servers.get(server.id)
except Exception, e:
module.fail_json( msg = "Error in getting info from instance: %s" % e.message)
if server.status == 'ACTIVE':
server = _add_floating_ip(module, nova, server)
private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private')
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
# now exit with info
module.exit_json(changed = True, id = server.id, private_ip=''.join(private), public_ip=''.join(public), status = server.status, info = server._info)
if server.status == 'ERROR':
module.fail_json(msg = "Error in creating the server, please check logs")
time.sleep(2)
module.fail_json(msg = "Timeout waiting for the server to come up.. Please check manually")
if server.status == 'ERROR':
module.fail_json(msg = "Error in creating the server.. Please check manually")
private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private')
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
module.exit_json(changed = True, id = info['id'], private_ip=''.join(private), public_ip=''.join(public), status = server.status, info = server._info)
def _delete_floating_ip_list(module, nova, server, extra_ips):
for ip in extra_ips:
nova.servers.remove_floating_ip(server=server.id, address=ip)
def _check_floating_ips(module, nova, server):
changed = False
if module.params['floating_ip_pools'] or module.params['floating_ips'] or module.params['auto_floating_ip']:
ips = openstack_find_nova_addresses(server.addresses, 'floating')
if not ips:
# If we're configured to have a floating but we don't have one,
# let's add one
server = _add_floating_ip(module, nova, server)
changed = True
elif module.params['floating_ips']:
# we were configured to have specific ips, let's make sure we have
# those
missing_ips = []
for ip in module.params['floating_ips']:
if ip not in ips:
missing_ips.append(ip)
if missing_ips:
server = _add_floating_ip_list(module, server, missing_ips)
changed = True
extra_ips = []
for ip in ips:
if ip not in module.params['floating_ips']:
extra_ips.append(ip)
if extra_ips:
_delete_floating_ip_list(module, server, extra_ips)
changed = True
return (changed, server)
def _get_server_state(module, nova):
server = None
try:
servers = nova.servers.list(True, {'name': module.params['name']})
if servers:
# the {'name': module.params['name']} will also return servers
# with names that partially match the server name, so we have to
# strictly filter here
servers = [x for x in servers if x.name == module.params['name']]
if servers:
server = servers[0]
except Exception, e:
module.fail_json(msg = "Error in getting the server list: %s" % e.message)
if server and module.params['state'] == 'present':
if server.status != 'ACTIVE':
module.fail_json( msg="The VM is available but not Active. state:" + server.status)
(ip_changed, server) = _check_floating_ips(module, nova, server)
private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private')
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
module.exit_json(changed = ip_changed, id = server.id, public_ip = ''.join(public), private_ip = ''.join(private), info = server._info)
if server and module.params['state'] == 'absent':
return True
if module.params['state'] == 'absent':
module.exit_json(changed = False, result = "not present")
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
image_id = dict(default=None),
image_name = dict(default=None),
image_exclude = dict(default='(deprecated)'),
flavor_id = dict(default=1),
flavor_ram = dict(default=None, type='int'),
flavor_include = dict(default=None),
key_name = dict(default=None),
security_groups = dict(default='default'),
nics = dict(default=None),
meta = dict(default=None),
wait = dict(default='yes', choices=['yes', 'no']),
wait_for = dict(default=180),
state = dict(default='present', choices=['absent', 'present']),
user_data = dict(default=None),
config_drive = dict(default=False, type='bool'),
auto_floating_ip = dict(default=False, type='bool'),
floating_ips = dict(default=None),
floating_ip_pools = dict(default=None),
))
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['auto_floating_ip','floating_ips'],
['auto_floating_ip','floating_ip_pools'],
['floating_ips','floating_ip_pools'],
['image_id','image_name'],
['flavor_id','flavor_ram'],
],
)
nova = nova_client.Client(module.params['login_username'],
module.params['login_password'],
module.params['login_tenant_name'],
module.params['auth_url'],
region_name=module.params['region_name'],
service_type='compute')
try:
nova.authenticate()
except exceptions.Unauthorized, e:
module.fail_json(msg = "Invalid OpenStack Nova credentials.: %s" % e.message)
except exceptions.AuthorizationFailure, e:
module.fail_json(msg = "Unable to authorize user: %s" % e.message)
if module.params['state'] == 'present':
if not module.params['image_id'] and not module.params['image_name']:
module.fail_json( msg = "Parameter 'image_id' or `image_name` is required if state == 'present'")
else:
_get_server_state(module, nova)
_create_server(module, nova)
if module.params['state'] == 'absent':
_get_server_state(module, nova)
_delete_server(module, nova)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

@ -1,139 +0,0 @@
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
# (c) 2013, John Dewey <john@dewey.ws>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
from novaclient.v1_1 import client as nova_client
from novaclient import exceptions as exc
import time
except ImportError:
print("failed=True msg='novaclient is required for this module to work'")
DOCUMENTATION = '''
---
module: nova_keypair
version_added: "1.2"
short_description: Add/Delete key pair from nova
description:
- Add or Remove key pair from nova .
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name that has to be given to the key pair
required: true
default: None
public_key:
description:
- The public key that would be uploaded to nova and injected to vm's upon creation
required: false
default: None
requirements: ["novaclient"]
'''
EXAMPLES = '''
# Creates a key pair with the running users public key
- nova_keypair: state=present login_username=admin
login_password=admin login_tenant_name=admin name=ansible_key
public_key={{ lookup('file','~/.ssh/id_rsa.pub') }}
# Creates a new key pair and the private key returned after the run.
- nova_keypair: state=present login_username=admin login_password=admin
login_tenant_name=admin name=ansible_key
'''
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
public_key = dict(default=None),
state = dict(default='present', choices=['absent', 'present'])
))
module = AnsibleModule(argument_spec=argument_spec)
nova = nova_client.Client(module.params['login_username'],
module.params['login_password'],
module.params['login_tenant_name'],
module.params['auth_url'],
region_name=module.params['region_name'],
service_type='compute')
try:
nova.authenticate()
except exc.Unauthorized, e:
module.fail_json(msg = "Invalid OpenStack Nova credentials.: %s" % e.message)
except exc.AuthorizationFailure, e:
module.fail_json(msg = "Unable to authorize user: %s" % e.message)
if module.params['state'] == 'present':
for key in nova.keypairs.list():
if key.name == module.params['name']:
if module.params['public_key'] and (module.params['public_key'] != key.public_key ):
module.fail_json(msg = "name {} present but key hash not the same as offered. Delete key first.".format(key['name']))
else:
module.exit_json(changed = False, result = "Key present")
try:
key = nova.keypairs.create(module.params['name'], module.params['public_key'])
except Exception, e:
module.exit_json(msg = "Error in creating the keypair: %s" % e.message)
if not module.params['public_key']:
module.exit_json(changed = True, key = key.private_key)
module.exit_json(changed = True, key = None)
if module.params['state'] == 'absent':
for key in nova.keypairs.list():
if key.name == module.params['name']:
try:
nova.keypairs.delete(module.params['name'])
except Exception, e:
module.fail_json(msg = "The keypair deletion has failed: %s" % e.message)
module.exit_json( changed = True, result = "deleted")
module.exit_json(changed = False, result = "not present")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

@ -1,266 +0,0 @@
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
from novaclient.v1_1 import client as nova_client
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
import time
except ImportError:
print("failed=True msg='novaclient,keystoneclient and quantumclient (or neutronclient) are required'")
DOCUMENTATION = '''
---
module: quantum_floating_ip
version_added: "1.2"
short_description: Add/Remove floating IP from an instance
description:
- Add or Remove a floating IP to an instance
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
network_name:
description:
- Name of the network from which IP has to be assigned to VM. Please make sure the network is an external network
required: true
default: None
instance_name:
description:
- The name of the instance to which the IP address should be assigned
required: true
default: None
internal_network_name:
description:
- The name of the network of the port to associate with the floating ip. Necessary when VM multiple networks.
required: false
default: None
version_added: "1.5"
requirements: ["novaclient", "quantumclient", "neutronclient", "keystoneclient"]
'''
EXAMPLES = '''
# Assign a floating ip to the instance from an external network
- quantum_floating_ip: state=present login_username=admin login_password=admin
login_tenant_name=admin network_name=external_network
instance_name=vm1 internal_network_name=internal_network
'''
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _get_server_state(module, nova):
server_info = None
server = None
try:
for server in nova.servers.list():
if server:
info = server._info
if info['name'] == module.params['instance_name']:
if info['status'] != 'ACTIVE' and module.params['state'] == 'present':
module.fail_json( msg="The VM is available but not Active. state:" + info['status'])
server_info = info
break
except Exception, e:
module.fail_json(msg = "Error in getting the server list: %s" % e.message)
return server_info, server
def _get_port_info(neutron, module, instance_id, internal_network_name=None):
subnet_id = None
if internal_network_name:
kwargs = {'name': internal_network_name}
networks = neutron.list_networks(**kwargs)
network_id = networks['networks'][0]['id']
kwargs = {
'network_id': network_id,
'ip_version': 4
}
subnets = neutron.list_subnets(**kwargs)
subnet_id = subnets['subnets'][0]['id']
kwargs = {
'device_id': instance_id,
}
try:
ports = neutron.list_ports(**kwargs)
except Exception, e:
module.fail_json( msg = "Error in listing ports: %s" % e.message)
if subnet_id:
port = next(port for port in ports['ports'] if port['fixed_ips'][0]['subnet_id'] == subnet_id)
port_id = port['id']
fixed_ip_address = port['fixed_ips'][0]['ip_address']
else:
port_id = ports['ports'][0]['id']
fixed_ip_address = ports['ports'][0]['fixed_ips'][0]['ip_address']
if not ports['ports']:
return None, None
return fixed_ip_address, port_id
def _get_floating_ip(module, neutron, fixed_ip_address):
kwargs = {
'fixed_ip_address': fixed_ip_address
}
try:
ips = neutron.list_floatingips(**kwargs)
except Exception, e:
module.fail_json(msg = "error in fetching the floatingips's %s" % e.message)
if not ips['floatingips']:
return None, None
return ips['floatingips'][0]['id'], ips['floatingips'][0]['floating_ip_address']
def _create_floating_ip(neutron, module, port_id, net_id, fixed_ip):
kwargs = {
'port_id': port_id,
'floating_network_id': net_id,
'fixed_ip_address': fixed_ip
}
try:
result = neutron.create_floatingip({'floatingip': kwargs})
except Exception, e:
module.fail_json(msg="There was an error in updating the floating ip address: %s" % e.message)
module.exit_json(changed=True, result=result, public_ip=result['floatingip']['floating_ip_address'])
def _get_net_id(neutron, module):
kwargs = {
'name': module.params['network_name'],
}
try:
networks = neutron.list_networks(**kwargs)
except Exception, e:
module.fail_json("Error in listing neutron networks: %s" % e.message)
if not networks['networks']:
return None
return networks['networks'][0]['id']
def _update_floating_ip(neutron, module, port_id, floating_ip_id):
kwargs = {
'port_id': port_id
}
try:
result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs})
except Exception, e:
module.fail_json(msg="There was an error in updating the floating ip address: %s" % e.message)
module.exit_json(changed=True, result=result)
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
network_name = dict(required=True),
instance_name = dict(required=True),
state = dict(default='present', choices=['absent', 'present']),
internal_network_name = dict(default=None),
))
module = AnsibleModule(argument_spec=argument_spec)
try:
nova = nova_client.Client(module.params['login_username'], module.params['login_password'],
module.params['login_tenant_name'], module.params['auth_url'], service_type='compute')
neutron = _get_neutron_client(module, module.params)
except Exception, e:
module.fail_json(msg="Error in authenticating to nova: %s" % e.message)
server_info, server_obj = _get_server_state(module, nova)
if not server_info:
module.fail_json(msg="The instance name provided cannot be found")
fixed_ip, port_id = _get_port_info(neutron, module, server_info['id'], module.params['internal_network_name'])
if not port_id:
module.fail_json(msg="Cannot find a port for this instance, maybe fixed ip is not assigned")
floating_id, floating_ip = _get_floating_ip(module, neutron, fixed_ip)
if module.params['state'] == 'present':
if floating_ip:
module.exit_json(changed = False, public_ip=floating_ip)
net_id = _get_net_id(neutron, module)
if not net_id:
module.fail_json(msg = "cannot find the network specified, please check")
_create_floating_ip(neutron, module, port_id, net_id, fixed_ip)
if module.params['state'] == 'absent':
if floating_ip:
_update_floating_ip(neutron, module, None, floating_id)
module.exit_json(changed=False)
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

@ -1,218 +0,0 @@
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
from novaclient.v1_1 import client as nova_client
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
import time
except ImportError:
print "failed=True msg='novaclient, keystone, and quantumclient (or neutronclient) client are required'"
DOCUMENTATION = '''
---
module: quantum_floating_ip_associate
version_added: "1.2"
short_description: Associate or disassociate a particular floating IP with an instance
description:
- Associates or disassociates a specific floating IP with a particular instance
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- the tenant name of the login user
required: true
default: true
auth_url:
description:
- the keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- name of the region
required: false
default: None
state:
description:
- indicates the desired state of the resource
choices: ['present', 'absent']
default: present
instance_name:
description:
- name of the instance to which the public IP should be assigned
required: true
default: None
ip_address:
description:
- floating ip that should be assigned to the instance
required: true
default: None
requirements: ["quantumclient", "neutronclient", "keystoneclient"]
'''
EXAMPLES = '''
# Associate a specific floating IP with an Instance
- quantum_floating_ip_associate:
state=present
login_username=admin
login_password=admin
login_tenant_name=admin
ip_address=1.1.1.1
instance_name=vm1
'''
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _get_server_state(module, nova):
server_info = None
server = None
try:
for server in nova.servers.list():
if server:
info = server._info
if info['name'] == module.params['instance_name']:
if info['status'] != 'ACTIVE' and module.params['state'] == 'present':
module.fail_json(msg="The VM is available but not Active. state:" + info['status'])
server_info = info
break
except Exception, e:
module.fail_json(msg = "Error in getting the server list: %s" % e.message)
return server_info, server
def _get_port_id(neutron, module, instance_id):
kwargs = dict(device_id = instance_id)
try:
ports = neutron.list_ports(**kwargs)
except Exception, e:
module.fail_json( msg = "Error in listing ports: %s" % e.message)
if not ports['ports']:
return None
return ports['ports'][0]['id']
def _get_floating_ip_id(module, neutron):
kwargs = {
'floating_ip_address': module.params['ip_address']
}
try:
ips = neutron.list_floatingips(**kwargs)
except Exception, e:
module.fail_json(msg = "error in fetching the floatingips's %s" % e.message)
if not ips['floatingips']:
module.fail_json(msg = "Could find the ip specified in parameter, Please check")
ip = ips['floatingips'][0]['id']
if not ips['floatingips'][0]['port_id']:
state = "detached"
else:
state = "attached"
return state, ip
def _update_floating_ip(neutron, module, port_id, floating_ip_id):
kwargs = {
'port_id': port_id
}
try:
result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs})
except Exception, e:
module.fail_json(msg = "There was an error in updating the floating ip address: %s" % e.message)
module.exit_json(changed = True, result = result, public_ip=module.params['ip_address'])
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
ip_address = dict(required=True),
instance_name = dict(required=True),
state = dict(default='present', choices=['absent', 'present'])
))
module = AnsibleModule(argument_spec=argument_spec)
try:
nova = nova_client.Client(module.params['login_username'], module.params['login_password'],
module.params['login_tenant_name'], module.params['auth_url'], service_type='compute')
except Exception, e:
module.fail_json( msg = " Error in authenticating to nova: %s" % e.message)
neutron = _get_neutron_client(module, module.params)
state, floating_ip_id = _get_floating_ip_id(module, neutron)
if module.params['state'] == 'present':
if state == 'attached':
module.exit_json(changed = False, result = 'attached', public_ip=module.params['ip_address'])
server_info, server_obj = _get_server_state(module, nova)
if not server_info:
module.fail_json(msg = " The instance name provided cannot be found")
port_id = _get_port_id(neutron, module, server_info['id'])
if not port_id:
module.fail_json(msg = "Cannot find a port for this instance, maybe fixed ip is not assigned")
_update_floating_ip(neutron, module, port_id, floating_ip_id)
if module.params['state'] == 'absent':
if state == 'detached':
module.exit_json(changed = False, result = 'detached')
if state == 'attached':
_update_floating_ip(neutron, module, None, floating_ip_id)
module.exit_json(changed = True, result = "detached")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

@ -1,279 +0,0 @@
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
except ImportError:
print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'")
DOCUMENTATION = '''
---
module: quantum_network
version_added: "1.4"
short_description: Creates/Removes networks from OpenStack
description:
- Add or Remove network from OpenStack.
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
tenant_name:
description:
- The name of the tenant for whom the network is created
required: false
default: None
auth_url:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be assigned to the nework
required: true
default: None
provider_network_type:
description:
- The type of the network to be created, gre, vlan, local. Available types depend on the plugin. The Quantum service decides if not specified.
required: false
default: None
provider_physical_network:
description:
- The physical network which would realize the virtual network for flat and vlan networks.
required: false
default: None
provider_segmentation_id:
description:
- The id that has to be assigned to the network, in case of vlan networks that would be vlan id and for gre the tunnel id
required: false
default: None
router_external:
description:
- If 'yes', specifies that the virtual network is a external network (public).
required: false
default: false
shared:
description:
- Whether this network is shared or not
required: false
default: false
admin_state_up:
description:
- Whether the state should be marked as up or down
required: false
default: true
requirements: ["quantumclient", "neutronclient", "keystoneclient"]
'''
EXAMPLES = '''
# Create a GRE backed Quantum network with tunnel id 1 for tenant1
- quantum_network: name=t1network tenant_name=tenant1 state=present
provider_network_type=gre provider_segmentation_id=1
login_username=admin login_password=admin login_tenant_name=admin
# Create an external network
- quantum_network: name=external_network state=present
provider_network_type=local router_external=yes
login_username=admin login_password=admin login_tenant_name=admin
'''
_os_keystone = None
_os_tenant_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s " %e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = " Error in connecting to neutron: %s " %e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
tenant_name = module.params['login_tenant_name']
else:
tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
def _get_net_id(neutron, module):
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['name'],
}
try:
networks = neutron.list_networks(**kwargs)
except Exception, e:
module.fail_json(msg = "Error in listing neutron networks: %s" % e.message)
if not networks['networks']:
return None
return networks['networks'][0]['id']
def _create_network(module, neutron):
neutron.format = 'json'
network = {
'name': module.params.get('name'),
'tenant_id': _os_tenant_id,
'provider:network_type': module.params.get('provider_network_type'),
'provider:physical_network': module.params.get('provider_physical_network'),
'provider:segmentation_id': module.params.get('provider_segmentation_id'),
'router:external': module.params.get('router_external'),
'shared': module.params.get('shared'),
'admin_state_up': module.params.get('admin_state_up'),
}
if module.params['provider_network_type'] == 'local':
network.pop('provider:physical_network', None)
network.pop('provider:segmentation_id', None)
if module.params['provider_network_type'] == 'flat':
network.pop('provider:segmentation_id', None)
if module.params['provider_network_type'] == 'gre':
network.pop('provider:physical_network', None)
if module.params['provider_network_type'] is None:
network.pop('provider:network_type', None)
network.pop('provider:physical_network', None)
network.pop('provider:segmentation_id', None)
try:
net = neutron.create_network({'network':network})
except Exception, e:
module.fail_json(msg = "Error in creating network: %s" % e.message)
return net['network']['id']
def _delete_network(module, net_id, neutron):
try:
id = neutron.delete_network(net_id)
except Exception, e:
module.fail_json(msg = "Error in deleting the network: %s" % e.message)
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
tenant_name = dict(default=None),
provider_network_type = dict(default=None, choices=['local', 'vlan', 'flat', 'gre']),
provider_physical_network = dict(default=None),
provider_segmentation_id = dict(default=None),
router_external = dict(default=False, type='bool'),
shared = dict(default=False, type='bool'),
admin_state_up = dict(default=True, type='bool'),
state = dict(default='present', choices=['absent', 'present'])
))
module = AnsibleModule(argument_spec=argument_spec)
if module.params['provider_network_type'] in ['vlan' , 'flat']:
if not module.params['provider_physical_network']:
module.fail_json(msg = " for vlan and flat networks, variable provider_physical_network should be set.")
if module.params['provider_network_type'] in ['vlan', 'gre']:
if not module.params['provider_segmentation_id']:
module.fail_json(msg = " for vlan & gre networks, variable provider_segmentation_id should be set.")
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
if module.params['state'] == 'present':
network_id = _get_net_id(neutron, module)
if not network_id:
network_id = _create_network(module, neutron)
module.exit_json(changed = True, result = "Created", id = network_id)
else:
module.exit_json(changed = False, result = "Success", id = network_id)
if module.params['state'] == 'absent':
network_id = _get_net_id(neutron, module)
if not network_id:
module.exit_json(changed = False, result = "Success")
else:
_delete_network(module, network_id, neutron)
module.exit_json(changed = True, result = "Deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

@ -1,210 +0,0 @@
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
except ImportError:
print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'")
DOCUMENTATION = '''
---
module: quantum_router
version_added: "1.2"
short_description: Create or Remove router from openstack
description:
- Create or Delete routers from OpenStack
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be give to the router
required: true
default: None
tenant_name:
description:
- Name of the tenant for which the router has to be created, if none router would be created for the login tenant.
required: false
default: None
admin_state_up:
description:
- desired admin state of the created router .
required: false
default: true
requirements: ["quantumclient", "neutronclient", "keystoneclient"]
'''
EXAMPLES = '''
# Creates a router for tenant admin
- quantum_router: state=present
login_username=admin
login_password=admin
login_tenant_name=admin
name=router1"
'''
_os_keystone = None
_os_tenant_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
login_tenant_name = module.params['login_tenant_name']
else:
login_tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == login_tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
def _get_router_id(module, neutron):
kwargs = {
'name': module.params['name'],
'tenant_id': _os_tenant_id,
}
try:
routers = neutron.list_routers(**kwargs)
except Exception, e:
module.fail_json(msg = "Error in getting the router list: %s " % e.message)
if not routers['routers']:
return None
return routers['routers'][0]['id']
def _create_router(module, neutron):
router = {
'name': module.params['name'],
'tenant_id': _os_tenant_id,
'admin_state_up': module.params['admin_state_up'],
}
try:
new_router = neutron.create_router(dict(router=router))
except Exception, e:
module.fail_json( msg = "Error in creating router: %s" % e.message)
return new_router['router']['id']
def _delete_router(module, neutron, router_id):
try:
neutron.delete_router(router_id)
except:
module.fail_json("Error in deleting the router")
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
tenant_name = dict(default=None),
state = dict(default='present', choices=['absent', 'present']),
admin_state_up = dict(type='bool', default=True),
))
module = AnsibleModule(argument_spec=argument_spec)
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
if module.params['state'] == 'present':
router_id = _get_router_id(module, neutron)
if not router_id:
router_id = _create_router(module, neutron)
module.exit_json(changed=True, result="Created", id=router_id)
else:
module.exit_json(changed=False, result="success" , id=router_id)
else:
router_id = _get_router_id(module, neutron)
if not router_id:
module.exit_json(changed=False, result="success")
else:
_delete_router(module, neutron, router_id)
module.exit_json(changed=True, result="deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

@ -1,213 +0,0 @@
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
except ImportError:
print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'")
DOCUMENTATION = '''
---
module: quantum_router_gateway
version_added: "1.2"
short_description: set/unset a gateway interface for the router with the specified external network
description:
- Creates/Removes a gateway interface from the router, used to associate a external network with a router to route external traffic.
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone URL for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
router_name:
description:
- Name of the router to which the gateway should be attached.
required: true
default: None
network_name:
description:
- Name of the external network which should be attached to the router.
required: true
default: None
requirements: ["quantumclient", "neutronclient", "keystoneclient"]
'''
EXAMPLES = '''
# Attach an external network with a router to allow flow of external traffic
- quantum_router_gateway: state=present login_username=admin login_password=admin
login_tenant_name=admin router_name=external_router
network_name=external_network
'''
_os_keystone = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _get_router_id(module, neutron):
kwargs = {
'name': module.params['router_name'],
}
try:
routers = neutron.list_routers(**kwargs)
except Exception, e:
module.fail_json(msg = "Error in getting the router list: %s " % e.message)
if not routers['routers']:
return None
return routers['routers'][0]['id']
def _get_net_id(neutron, module):
kwargs = {
'name': module.params['network_name'],
'router:external': True
}
try:
networks = neutron.list_networks(**kwargs)
except Exception, e:
module.fail_json("Error in listing neutron networks: %s" % e.message)
if not networks['networks']:
return None
return networks['networks'][0]['id']
def _get_port_id(neutron, module, router_id, network_id):
kwargs = {
'device_id': router_id,
'network_id': network_id,
}
try:
ports = neutron.list_ports(**kwargs)
except Exception, e:
module.fail_json( msg = "Error in listing ports: %s" % e.message)
if not ports['ports']:
return None
return ports['ports'][0]['id']
def _add_gateway_router(neutron, module, router_id, network_id):
kwargs = {
'network_id': network_id
}
try:
neutron.add_gateway_router(router_id, kwargs)
except Exception, e:
module.fail_json(msg = "Error in adding gateway to router: %s" % e.message)
return True
def _remove_gateway_router(neutron, module, router_id):
try:
neutron.remove_gateway_router(router_id)
except Exception, e:
module.fail_json(msg = "Error in removing gateway to router: %s" % e.message)
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
router_name = dict(required=True),
network_name = dict(required=True),
state = dict(default='present', choices=['absent', 'present']),
))
module = AnsibleModule(argument_spec=argument_spec)
neutron = _get_neutron_client(module, module.params)
router_id = _get_router_id(module, neutron)
if not router_id:
module.fail_json(msg="failed to get the router id, please check the router name")
network_id = _get_net_id(neutron, module)
if not network_id:
module.fail_json(msg="failed to get the network id, please check the network name and make sure it is external")
if module.params['state'] == 'present':
port_id = _get_port_id(neutron, module, router_id, network_id)
if not port_id:
_add_gateway_router(neutron, module, router_id, network_id)
module.exit_json(changed=True, result="created")
module.exit_json(changed=False, result="success")
if module.params['state'] == 'absent':
port_id = _get_port_id(neutron, module, router_id, network_id)
if not port_id:
module.exit_json(changed=False, result="Success")
_remove_gateway_router(neutron, module, router_id)
module.exit_json(changed=True, result="Deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

@ -1,249 +0,0 @@
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
except ImportError:
print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'")
DOCUMENTATION = '''
---
module: quantum_router_interface
version_added: "1.2"
short_description: Attach/Dettach a subnet's interface to a router
description:
- Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet.
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone URL for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
router_name:
description:
- Name of the router to which the subnet's interface should be attached.
required: true
default: None
subnet_name:
description:
- Name of the subnet to whose interface should be attached to the router.
required: true
default: None
tenant_name:
description:
- Name of the tenant whose subnet has to be attached.
required: false
default: None
requirements: ["quantumclient", "keystoneclient"]
'''
EXAMPLES = '''
# Attach tenant1's subnet to the external router
- quantum_router_interface: state=present login_username=admin
login_password=admin
login_tenant_name=admin
tenant_name=tenant1
router_name=external_route
subnet_name=t1subnet
'''
_os_keystone = None
_os_tenant_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
login_tenant_name = module.params['login_tenant_name']
else:
login_tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == login_tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
def _get_router_id(module, neutron):
kwargs = {
'name': module.params['router_name'],
}
try:
routers = neutron.list_routers(**kwargs)
except Exception, e:
module.fail_json(msg = "Error in getting the router list: %s " % e.message)
if not routers['routers']:
return None
return routers['routers'][0]['id']
def _get_subnet_id(module, neutron):
subnet_id = None
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['subnet_name'],
}
try:
subnets = neutron.list_subnets(**kwargs)
except Exception, e:
module.fail_json( msg = " Error in getting the subnet list:%s " % e.message)
if not subnets['subnets']:
return None
return subnets['subnets'][0]['id']
def _get_port_id(neutron, module, router_id, subnet_id):
kwargs = {
'tenant_id': _os_tenant_id,
'device_id': router_id,
}
try:
ports = neutron.list_ports(**kwargs)
except Exception, e:
module.fail_json( msg = "Error in listing ports: %s" % e.message)
if not ports['ports']:
return None
for port in ports['ports']:
for subnet in port['fixed_ips']:
if subnet['subnet_id'] == subnet_id:
return port['id']
return None
def _add_interface_router(neutron, module, router_id, subnet_id):
kwargs = {
'subnet_id': subnet_id
}
try:
neutron.add_interface_router(router_id, kwargs)
except Exception, e:
module.fail_json(msg = "Error in adding interface to router: %s" % e.message)
return True
def _remove_interface_router(neutron, module, router_id, subnet_id):
kwargs = {
'subnet_id': subnet_id
}
try:
neutron.remove_interface_router(router_id, kwargs)
except Exception, e:
module.fail_json(msg="Error in removing interface from router: %s" % e.message)
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
router_name = dict(required=True),
subnet_name = dict(required=True),
tenant_name = dict(default=None),
state = dict(default='present', choices=['absent', 'present']),
))
module = AnsibleModule(argument_spec=argument_spec)
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
router_id = _get_router_id(module, neutron)
if not router_id:
module.fail_json(msg="failed to get the router id, please check the router name")
subnet_id = _get_subnet_id(module, neutron)
if not subnet_id:
module.fail_json(msg="failed to get the subnet id, please check the subnet name")
if module.params['state'] == 'present':
port_id = _get_port_id(neutron, module, router_id, subnet_id)
if not port_id:
_add_interface_router(neutron, module, router_id, subnet_id)
module.exit_json(changed=True, result="created", id=port_id)
module.exit_json(changed=False, result="success", id=port_id)
if module.params['state'] == 'absent':
port_id = _get_port_id(neutron, module, router_id, subnet_id)
if not port_id:
module.exit_json(changed = False, result = "Success")
_remove_interface_router(neutron, module, router_id, subnet_id)
module.exit_json(changed=True, result="Deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

@ -1,291 +0,0 @@
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
except ImportError:
print("failed=True msg='quantumclient (or neutronclient) and keystoneclient are required'")
DOCUMENTATION = '''
---
module: quantum_subnet
version_added: "1.2"
short_description: Add/remove subnet from a network
description:
- Add/remove subnet from a network
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: True
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: True
auth_url:
description:
- The keystone URL for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
network_name:
description:
- Name of the network to which the subnet should be attached
required: true
default: None
name:
description:
- The name of the subnet that should be created
required: true
default: None
cidr:
description:
- The CIDR representation of the subnet that should be assigned to the subnet
required: true
default: None
tenant_name:
description:
- The name of the tenant for whom the subnet should be created
required: false
default: None
ip_version:
description:
- The IP version of the subnet 4 or 6
required: false
default: 4
enable_dhcp:
description:
- Whether DHCP should be enabled for this subnet.
required: false
default: true
gateway_ip:
description:
- The ip that would be assigned to the gateway for this subnet
required: false
default: None
dns_nameservers:
description:
- DNS nameservers for this subnet, comma-separated
required: false
default: None
version_added: "1.4"
allocation_pool_start:
description:
- From the subnet pool the starting address from which the IP should be allocated
required: false
default: None
allocation_pool_end:
description:
- From the subnet pool the last IP that should be assigned to the virtual machines
required: false
default: None
requirements: ["quantumclient", "neutronclient", "keystoneclient"]
'''
EXAMPLES = '''
# Create a subnet for a tenant with the specified subnet
- quantum_subnet: state=present login_username=admin login_password=admin
login_tenant_name=admin tenant_name=tenant1
network_name=network1 name=net1subnet cidr=192.168.0.0/24"
'''
_os_keystone = None
_os_tenant_id = None
_os_network_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = " Error in connecting to neutron: %s" % e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
tenant_name = module.params['login_tenant_name']
else:
tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
def _get_net_id(neutron, module):
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['network_name'],
}
try:
networks = neutron.list_networks(**kwargs)
except Exception, e:
module.fail_json("Error in listing neutron networks: %s" % e.message)
if not networks['networks']:
return None
return networks['networks'][0]['id']
def _get_subnet_id(module, neutron):
global _os_network_id
subnet_id = None
_os_network_id = _get_net_id(neutron, module)
if not _os_network_id:
module.fail_json(msg = "network id of network not found.")
else:
kwargs = {
'tenant_id': _os_tenant_id,
'name': module.params['name'],
}
try:
subnets = neutron.list_subnets(**kwargs)
except Exception, e:
module.fail_json( msg = " Error in getting the subnet list:%s " % e.message)
if not subnets['subnets']:
return None
return subnets['subnets'][0]['id']
def _create_subnet(module, neutron):
neutron.format = 'json'
subnet = {
'name': module.params['name'],
'ip_version': module.params['ip_version'],
'enable_dhcp': module.params['enable_dhcp'],
'tenant_id': _os_tenant_id,
'gateway_ip': module.params['gateway_ip'],
'dns_nameservers': module.params['dns_nameservers'],
'network_id': _os_network_id,
'cidr': module.params['cidr'],
}
if module.params['allocation_pool_start'] and module.params['allocation_pool_end']:
allocation_pools = [
{
'start' : module.params['allocation_pool_start'],
'end' : module.params['allocation_pool_end']
}
]
subnet.update({'allocation_pools': allocation_pools})
if not module.params['gateway_ip']:
subnet.pop('gateway_ip')
if module.params['dns_nameservers']:
subnet['dns_nameservers'] = module.params['dns_nameservers'].split(',')
else:
subnet.pop('dns_nameservers')
try:
new_subnet = neutron.create_subnet(dict(subnet=subnet))
except Exception, e:
module.fail_json(msg = "Failure in creating subnet: %s" % e.message)
return new_subnet['subnet']['id']
def _delete_subnet(module, neutron, subnet_id):
try:
neutron.delete_subnet(subnet_id)
except Exception, e:
module.fail_json( msg = "Error in deleting subnet: %s" % e.message)
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
network_name = dict(required=True),
cidr = dict(required=True),
tenant_name = dict(default=None),
state = dict(default='present', choices=['absent', 'present']),
ip_version = dict(default='4', choices=['4', '6']),
enable_dhcp = dict(default='true', type='bool'),
gateway_ip = dict(default=None),
dns_nameservers = dict(default=None),
allocation_pool_start = dict(default=None),
allocation_pool_end = dict(default=None),
))
module = AnsibleModule(argument_spec=argument_spec)
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
if module.params['state'] == 'present':
subnet_id = _get_subnet_id(module, neutron)
if not subnet_id:
subnet_id = _create_subnet(module, neutron)
module.exit_json(changed = True, result = "Created" , id = subnet_id)
else:
module.exit_json(changed = False, result = "success" , id = subnet_id)
else:
subnet_id = _get_subnet_id(module, neutron)
if not subnet_id:
module.exit_json(changed = False, result = "success")
else:
_delete_subnet(module, neutron, subnet_id)
module.exit_json(changed = True, result = "deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

@ -1,711 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax
short_description: create / delete an instance in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud instance and optionally
waits for it to be 'running'.
version_added: "1.2"
options:
auto_increment:
description:
- Whether or not to increment a single number with the name of the
created servers. Only applicable when used with the I(group) attribute
or meta key.
default: yes
choices:
- "yes"
- "no"
version_added: 1.5
config_drive:
description:
- Attach read-only configuration drive to server as label config-2
default: no
choices:
- "yes"
- "no"
version_added: 1.7
count:
description:
- number of instances to launch
default: 1
version_added: 1.4
count_offset:
description:
- number count to start at
default: 1
version_added: 1.4
disk_config:
description:
- Disk partitioning strategy
choices:
- auto
- manual
version_added: '1.4'
default: auto
exact_count:
description:
- Explicitly ensure an exact count of instances, used with
state=active/present
default: no
choices:
- "yes"
- "no"
version_added: 1.4
extra_client_args:
description:
- A hash of key/value pairs to be used when creating the cloudservers
client. This is considered an advanced option, use it wisely and
with caution.
version_added: 1.6
extra_create_args:
description:
- A hash of key/value pairs to be used when creating a new server.
This is considered an advanced option, use it wisely and with caution.
version_added: 1.6
files:
description:
- Files to insert into the instance. remotefilename:localcontent
default: null
flavor:
description:
- flavor to use for the instance
default: null
group:
description:
- host group to assign to server, is also used for idempotent operations
to ensure a specific number of instances
version_added: 1.4
image:
description:
- image to use for the instance. Can be an C(id), C(human_id) or C(name)
default: null
instance_ids:
description:
- list of instance ids, currently only used when state='absent' to
remove instances
version_added: 1.4
key_name:
description:
- key pair to use on the instance
default: null
aliases:
- keypair
meta:
description:
- A hash of metadata to associate with the instance
default: null
name:
description:
- Name to give the instance
default: null
networks:
description:
- The network to attach to the instances. If specified, you must include
ALL networks including the public and private interfaces. Can be C(id)
or C(label).
default:
- public
- private
version_added: 1.4
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
user_data:
description:
- Data to be uploaded to the servers config drive. This option implies
I(config_drive). Can be a file path or a string
version_added: 1.7
wait:
description:
- wait for the instance to be in state 'running' before returning
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author: Jesse Keating, Matt Martz
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build a Cloud Server
gather_facts: False
tasks:
- name: Server build request
local_action:
module: rax
credentials: ~/.raxpub
name: rax-test1
flavor: 5
image: b11d9567-e412-4255-96b9-bd63ab23bcfe
key_name: my_rackspace_key
files:
/root/test.txt: /home/localuser/test.txt
wait: yes
state: present
networks:
- private
- public
register: rax
- name: Build an exact count of cloud servers with incremented names
hosts: local
gather_facts: False
tasks:
- name: Server build requests
local_action:
module: rax
credentials: ~/.raxpub
name: test%03d.example.org
flavor: performance1-1
image: ubuntu-1204-lts-precise-pangolin
state: present
count: 10
count_offset: 10
exact_count: yes
group: test
wait: yes
register: rax
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def create(module, names=[], flavor=None, image=None, meta={}, key_name=None,
files={}, wait=True, wait_timeout=300, disk_config=None,
group=None, nics=[], extra_create_args={}, user_data=None,
config_drive=False, existing=[]):
cs = pyrax.cloudservers
changed = False
if user_data:
config_drive = True
if user_data and os.path.isfile(user_data):
try:
f = open(user_data)
user_data = f.read()
f.close()
except Exception, e:
module.fail_json(msg='Failed to load %s' % user_data)
# Handle the file contents
for rpath in files.keys():
lpath = os.path.expanduser(files[rpath])
try:
fileobj = open(lpath, 'r')
files[rpath] = fileobj.read()
fileobj.close()
except Exception, e:
module.fail_json(msg='Failed to load %s' % lpath)
try:
servers = []
for name in names:
servers.append(cs.servers.create(name=name, image=image,
flavor=flavor, meta=meta,
key_name=key_name,
files=files, nics=nics,
disk_config=disk_config,
config_drive=config_drive,
userdata=user_data,
**extra_create_args))
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
if wait:
end_time = time.time() + wait_timeout
infinite = wait_timeout == 0
while infinite or time.time() < end_time:
for server in servers:
try:
server.get()
except:
server.status == 'ERROR'
if not filter(lambda s: s.status not in FINAL_STATUSES,
servers):
break
time.sleep(5)
success = []
error = []
timeout = []
for server in servers:
try:
server.get()
except:
server.status == 'ERROR'
instance = rax_to_dict(server, 'server')
if server.status == 'ACTIVE' or not wait:
success.append(instance)
elif server.status == 'ERROR':
error.append(instance)
elif wait:
timeout.append(instance)
untouched = [rax_to_dict(s, 'server') for s in existing]
instances = success + untouched
results = {
'changed': changed,
'action': 'create',
'instances': instances,
'success': success,
'error': error,
'timeout': timeout,
'instance_ids': {
'instances': [i['id'] for i in instances],
'success': [i['id'] for i in success],
'error': [i['id'] for i in error],
'timeout': [i['id'] for i in timeout]
}
}
if timeout:
results['msg'] = 'Timeout waiting for all servers to build'
elif error:
results['msg'] = 'Failed to build all servers'
if 'msg' in results:
module.fail_json(**results)
else:
module.exit_json(**results)
def delete(module, instance_ids=[], wait=True, wait_timeout=300, kept=[]):
cs = pyrax.cloudservers
changed = False
instances = {}
servers = []
for instance_id in instance_ids:
servers.append(cs.servers.get(instance_id))
for server in servers:
try:
server.delete()
except Exception, e:
module.fail_json(msg=e.message)
else:
changed = True
instance = rax_to_dict(server, 'server')
instances[instance['id']] = instance
# If requested, wait for server deletion
if wait:
end_time = time.time() + wait_timeout
infinite = wait_timeout == 0
while infinite or time.time() < end_time:
for server in servers:
instance_id = server.id
try:
server.get()
except:
instances[instance_id]['status'] = 'DELETED'
instances[instance_id]['rax_status'] = 'DELETED'
if not filter(lambda s: s['status'] not in ('', 'DELETED',
'ERROR'),
instances.values()):
break
time.sleep(5)
timeout = filter(lambda s: s['status'] not in ('', 'DELETED', 'ERROR'),
instances.values())
error = filter(lambda s: s['status'] in ('ERROR'),
instances.values())
success = filter(lambda s: s['status'] in ('', 'DELETED'),
instances.values())
instances = [rax_to_dict(s, 'server') for s in kept]
results = {
'changed': changed,
'action': 'delete',
'instances': instances,
'success': success,
'error': error,
'timeout': timeout,
'instance_ids': {
'instances': [i['id'] for i in instances],
'success': [i['id'] for i in success],
'error': [i['id'] for i in error],
'timeout': [i['id'] for i in timeout]
}
}
if timeout:
results['msg'] = 'Timeout waiting for all servers to delete'
elif error:
results['msg'] = 'Failed to delete all servers'
if 'msg' in results:
module.fail_json(**results)
else:
module.exit_json(**results)
def cloudservers(module, state=None, name=None, flavor=None, image=None,
meta={}, key_name=None, files={}, wait=True, wait_timeout=300,
disk_config=None, count=1, group=None, instance_ids=[],
exact_count=False, networks=[], count_offset=0,
auto_increment=False, extra_create_args={}, user_data=None,
config_drive=False):
cs = pyrax.cloudservers
cnw = pyrax.cloud_networks
if not cnw:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
servers = []
# Add the group meta key
if group and 'group' not in meta:
meta['group'] = group
elif 'group' in meta and group is None:
group = meta['group']
# Normalize and ensure all metadata values are strings
for k, v in meta.items():
if isinstance(v, list):
meta[k] = ','.join(['%s' % i for i in v])
elif isinstance(v, dict):
meta[k] = json.dumps(v)
elif not isinstance(v, basestring):
meta[k] = '%s' % v
# When using state=absent with group, the absent block won't match the
# names properly. Use the exact_count functionality to decrease the count
# to the desired level
was_absent = False
if group is not None and state == 'absent':
exact_count = True
state = 'present'
was_absent = True
if image:
image = rax_find_image(module, pyrax, image)
nics = []
if networks:
for network in networks:
nics.extend(rax_find_network(module, pyrax, network))
# act on the state
if state == 'present':
for arg, value in dict(name=name, flavor=flavor,
image=image).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax" module' %
arg)
# Idempotent ensurance of a specific count of servers
if exact_count is not False:
# See if we can find servers that match our options
if group is None:
module.fail_json(msg='"group" must be provided when using '
'"exact_count"')
else:
if auto_increment:
numbers = set()
try:
name % 0
except TypeError, e:
if e.message.startswith('not all'):
name = '%s%%d' % name
else:
module.fail_json(msg=e.message)
pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
for server in cs.servers.list():
if server.metadata.get('group') == group:
servers.append(server)
match = re.search(pattern, server.name)
if match:
number = int(match.group(1))
numbers.add(number)
number_range = xrange(count_offset, count_offset + count)
available_numbers = list(set(number_range)
.difference(numbers))
else:
for server in cs.servers.list():
if server.metadata.get('group') == group:
servers.append(server)
# If state was absent but the count was changed,
# assume we only wanted to remove that number of instances
if was_absent:
diff = len(servers) - count
if diff < 0:
count = 0
else:
count = diff
if len(servers) > count:
state = 'absent'
kept = servers[:count]
del servers[:count]
instance_ids = []
for server in servers:
instance_ids.append(server.id)
delete(module, instance_ids=instance_ids, wait=wait,
wait_timeout=wait_timeout, kept=kept)
elif len(servers) < count:
if auto_increment:
names = []
name_slice = count - len(servers)
numbers_to_use = available_numbers[:name_slice]
for number in numbers_to_use:
names.append(name % number)
else:
names = [name] * (count - len(servers))
else:
instances = []
instance_ids = []
for server in servers:
instances.append(rax_to_dict(server, 'server'))
instance_ids.append(server.id)
module.exit_json(changed=False, action=None,
instances=instances,
success=[], error=[], timeout=[],
instance_ids={'instances': instance_ids,
'success': [], 'error': [],
'timeout': []})
else:
if group is not None:
if auto_increment:
numbers = set()
try:
name % 0
except TypeError, e:
if e.message.startswith('not all'):
name = '%s%%d' % name
else:
module.fail_json(msg=e.message)
pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
for server in cs.servers.list():
if server.metadata.get('group') == group:
servers.append(server)
match = re.search(pattern, server.name)
if match:
number = int(match.group(1))
numbers.add(number)
number_range = xrange(count_offset,
count_offset + count + len(numbers))
available_numbers = list(set(number_range)
.difference(numbers))
names = []
numbers_to_use = available_numbers[:count]
for number in numbers_to_use:
names.append(name % number)
else:
names = [name] * count
else:
search_opts = {
'name': '^%s$' % name,
'image': image,
'flavor': flavor
}
servers = []
for server in cs.servers.list(search_opts=search_opts):
if server.metadata != meta:
continue
servers.append(server)
if len(servers) >= count:
instances = []
for server in servers:
instances.append(rax_to_dict(server, 'server'))
instance_ids = [i['id'] for i in instances]
module.exit_json(changed=False, action=None,
instances=instances, success=[], error=[],
timeout=[],
instance_ids={'instances': instance_ids,
'success': [], 'error': [],
'timeout': []})
names = [name] * (count - len(servers))
create(module, names=names, flavor=flavor, image=image,
meta=meta, key_name=key_name, files=files, wait=wait,
wait_timeout=wait_timeout, disk_config=disk_config, group=group,
nics=nics, extra_create_args=extra_create_args,
user_data=user_data, config_drive=config_drive,
existing=servers)
elif state == 'absent':
if instance_ids is None:
for arg, value in dict(name=name, flavor=flavor,
image=image).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax" '
'module' % arg)
search_opts = {
'name': '^%s$' % name,
'image': image,
'flavor': flavor
}
for server in cs.servers.list(search_opts=search_opts):
if meta != server.metadata:
continue
servers.append(server)
instance_ids = []
for server in servers:
if len(instance_ids) < count:
instance_ids.append(server.id)
else:
break
if not instance_ids:
module.exit_json(changed=False, action=None, instances=[],
success=[], error=[], timeout=[],
instance_ids={'instances': [],
'success': [], 'error': [],
'timeout': []})
delete(module, instance_ids=instance_ids, wait=wait,
wait_timeout=wait_timeout)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
auto_increment=dict(default=True, type='bool'),
config_drive=dict(default=False, type='bool'),
count=dict(default=1, type='int'),
count_offset=dict(default=1, type='int'),
disk_config=dict(choices=['auto', 'manual']),
exact_count=dict(default=False, type='bool'),
extra_client_args=dict(type='dict', default={}),
extra_create_args=dict(type='dict', default={}),
files=dict(type='dict', default={}),
flavor=dict(),
group=dict(),
image=dict(),
instance_ids=dict(type='list'),
key_name=dict(aliases=['keypair']),
meta=dict(type='dict', default={}),
name=dict(),
networks=dict(type='list', default=['public', 'private']),
service=dict(),
state=dict(default='present', choices=['present', 'absent']),
user_data=dict(no_log=True),
wait=dict(default=False, type='bool'),
wait_timeout=dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
service = module.params.get('service')
if service is not None:
module.fail_json(msg='The "service" attribute has been deprecated, '
'please remove "service: cloudservers" from your '
'playbook pertaining to the "rax" module')
auto_increment = module.params.get('auto_increment')
config_drive = module.params.get('config_drive')
count = module.params.get('count')
count_offset = module.params.get('count_offset')
disk_config = module.params.get('disk_config')
if disk_config:
disk_config = disk_config.upper()
exact_count = module.params.get('exact_count', False)
extra_client_args = module.params.get('extra_client_args')
extra_create_args = module.params.get('extra_create_args')
files = module.params.get('files')
flavor = module.params.get('flavor')
group = module.params.get('group')
image = module.params.get('image')
instance_ids = module.params.get('instance_ids')
key_name = module.params.get('key_name')
meta = module.params.get('meta')
name = module.params.get('name')
networks = module.params.get('networks')
state = module.params.get('state')
user_data = module.params.get('user_data')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
setup_rax_module(module, pyrax)
if extra_client_args:
pyrax.cloudservers = pyrax.connect_to_cloudservers(
region=pyrax.cloudservers.client.region_name,
**extra_client_args)
client = pyrax.cloudservers.client
if 'bypass_url' in extra_client_args:
client.management_url = extra_client_args['bypass_url']
if pyrax.cloudservers is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
cloudservers(module, state=state, name=name, flavor=flavor,
image=image, meta=meta, key_name=key_name, files=files,
wait=wait, wait_timeout=wait_timeout, disk_config=disk_config,
count=count, group=group, instance_ids=instance_ids,
exact_count=exact_count, networks=networks,
count_offset=count_offset, auto_increment=auto_increment,
extra_create_args=extra_create_args, user_data=user_data,
config_drive=config_drive)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()

@ -1,220 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_cbs
short_description: Manipulate Rackspace Cloud Block Storage Volumes
description:
- Manipulate Rackspace Cloud Block Storage Volumes
version_added: 1.6
options:
description:
description:
- Description to give the volume being created
default: null
meta:
description:
- A hash of metadata to associate with the volume
default: null
name:
description:
- Name to give the volume being created
default: null
required: true
size:
description:
- Size of the volume to create in Gigabytes
default: 100
required: true
snapshot_id:
description:
- The id of the snapshot to create the volume from
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
required: true
volume_type:
description:
- Type of the volume being created
choices:
- SATA
- SSD
default: SATA
required: true
wait:
description:
- wait for the volume to be in state 'available' before returning
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author: Christopher H. Laco, Matt Martz
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build a Block Storage Volume
gather_facts: False
hosts: local
connection: local
tasks:
- name: Storage volume create request
local_action:
module: rax_cbs
credentials: ~/.raxpub
name: my-volume
description: My Volume
volume_type: SSD
size: 150
region: DFW
wait: yes
state: present
meta:
app: my-cool-app
register: my_volume
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout):
for arg in (state, name, size, volume_type):
if not arg:
module.fail_json(msg='%s is required for rax_cbs' % arg)
if size < 100:
module.fail_json(msg='"size" must be greater than or equal to 100')
changed = False
volume = None
instance = {}
cbs = pyrax.cloud_blockstorage
if cbs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
volume = rax_find_volume(module, pyrax, name)
if state == 'present':
if not volume:
try:
volume = cbs.create(name, size=size, volume_type=volume_type,
description=description,
metadata=meta,
snapshot_id=snapshot_id)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
if wait:
attempts = wait_timeout / 5
pyrax.utils.wait_for_build(volume, interval=5,
attempts=attempts)
volume.get()
for key, value in vars(volume).iteritems():
if (isinstance(value, NON_CALLABLES) and
not key.startswith('_')):
instance[key] = value
result = dict(changed=changed, volume=instance)
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
elif wait and volume.status not in VOLUME_STATUS:
result['msg'] = 'Timeout waiting on %s' % volume.id
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
elif state == 'absent':
if volume:
try:
volume.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, volume=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
description=dict(),
meta=dict(type='dict', default={}),
name=dict(required=True),
size=dict(type='int', default=100),
snapshot_id=dict(),
state=dict(default='present', choices=['present', 'absent']),
volume_type=dict(choices=['SSD', 'SATA'], default='SATA'),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
description = module.params.get('description')
meta = module.params.get('meta')
name = module.params.get('name')
size = module.params.get('size')
snapshot_id = module.params.get('snapshot_id')
state = module.params.get('state')
volume_type = module.params.get('volume_type')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()

@ -1,226 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_cbs_attachments
short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments
description:
- Manipulate Rackspace Cloud Block Storage Volume Attachments
version_added: 1.6
options:
device:
description:
- The device path to attach the volume to, e.g. /dev/xvde
default: null
required: true
volume:
description:
- Name or id of the volume to attach/detach
default: null
required: true
server:
description:
- Name or id of the server to attach/detach
default: null
required: true
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
required: true
wait:
description:
- wait for the volume to be in 'in-use'/'available' state before returning
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author: Christopher H. Laco, Matt Martz
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Attach a Block Storage Volume
gather_facts: False
hosts: local
connection: local
tasks:
- name: Storage volume attach request
local_action:
module: rax_cbs_attachments
credentials: ~/.raxpub
volume: my-volume
server: my-server
device: /dev/xvdd
region: DFW
wait: yes
state: present
register: my_volume
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_block_storage_attachments(module, state, volume, server, device,
wait, wait_timeout):
for arg in (state, volume, server, device):
if not arg:
module.fail_json(msg='%s is required for rax_cbs_attachments' %
arg)
cbs = pyrax.cloud_blockstorage
cs = pyrax.cloudservers
if cbs is None or cs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
changed = False
instance = {}
volume = rax_find_volume(module, pyrax, volume)
if not volume:
module.fail_json(msg='No matching storage volumes were found')
if state == 'present':
server = rax_find_server(module, pyrax, server)
if (volume.attachments and
volume.attachments[0]['server_id'] == server.id):
changed = False
elif volume.attachments:
module.fail_json(msg='Volume is attached to another server')
else:
try:
volume.attach_to_instance(server, mountpoint=device)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
volume.get()
for key, value in vars(volume).iteritems():
if (isinstance(value, NON_CALLABLES) and
not key.startswith('_')):
instance[key] = value
result = dict(changed=changed, volume=instance)
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
elif wait:
attempts = wait_timeout / 5
pyrax.utils.wait_until(volume, 'status', 'in-use',
interval=5, attempts=attempts)
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
elif state == 'absent':
server = rax_find_server(module, pyrax, server)
if (volume.attachments and
volume.attachments[0]['server_id'] == server.id):
try:
volume.detach()
if wait:
pyrax.utils.wait_until(volume, 'status', 'available',
interval=3, attempts=0,
verbose=False)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
volume.get()
changed = True
elif volume.attachments:
module.fail_json(msg='Volume is attached to another server')
for key, value in vars(volume).iteritems():
if (isinstance(value, NON_CALLABLES) and
not key.startswith('_')):
instance[key] = value
result = dict(changed=changed, volume=instance)
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
module.exit_json(changed=changed, volume=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
device=dict(required=True),
volume=dict(required=True),
server=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
device = module.params.get('device')
volume = module.params.get('volume')
server = module.params.get('server')
state = module.params.get('state')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
cloud_block_storage_attachments(module, state, volume, server, device,
wait, wait_timeout)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()

@ -1,238 +0,0 @@
#!/usr/bin/python -tt
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_cdb
short_description: create/delete or resize a Rackspace Cloud Databases instance
description:
- creates / deletes or resize a Rackspace Cloud Databases instance
and optionally waits for it to be 'running'. The name option needs to be
unique since it's used to identify the instance.
version_added: "1.8"
options:
name:
description:
- Name of the databases server instance
default: null
flavor:
description:
- flavor to use for the instance 1 to 6 (i.e. 512MB to 16GB)
default: 1
volume:
description:
- Volume size of the database 1-150GB
default: 2
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
wait:
description:
- wait for the instance to be in state 'running' before returning
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author: Simon JAILLET
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Build a Cloud Databases
gather_facts: False
tasks:
- name: Server build request
local_action:
module: rax_cdb
credentials: ~/.raxpub
region: IAD
name: db-server1
flavor: 1
volume: 2
wait: yes
state: present
register: rax_db_server
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def find_instance(name):
cdb = pyrax.cloud_databases
instances = cdb.list()
if instances:
for instance in instances:
if instance.name == name:
return instance
return False
def save_instance(module, name, flavor, volume, wait, wait_timeout):
for arg, value in dict(name=name, flavor=flavor,
volume=volume).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax_cdb"'
' module' % arg)
if not (volume >= 1 and volume <= 150):
module.fail_json(msg='volume is required to be between 1 and 150')
cdb = pyrax.cloud_databases
flavors = []
for item in cdb.list_flavors():
flavors.append(item.id)
if not (flavor in flavors):
module.fail_json(msg='unexisting flavor reference "%s"' % str(flavor))
changed = False
instance = find_instance(name)
if not instance:
action = 'create'
try:
instance = cdb.create(name=name, flavor=flavor, volume=volume)
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
else:
action = None
if instance.volume.size != volume:
action = 'resize'
if instance.volume.size > volume:
module.fail_json(changed=False, action=action,
msg='The new volume size must be larger than '
'the current volume size',
cdb=rax_to_dict(instance))
instance.resize_volume(volume)
changed = True
if int(instance.flavor.id) != flavor:
action = 'resize'
pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
attempts=wait_timeout)
instance.resize(flavor)
changed = True
if wait:
pyrax.utils.wait_until(instance, 'status', 'ACTIVE',
attempts=wait_timeout)
if wait and instance.status != 'ACTIVE':
module.fail_json(changed=changed, action=action,
cdb=rax_to_dict(instance),
msg='Timeout waiting for "%s" databases instance to '
'be created' % name)
module.exit_json(changed=changed, action=action, cdb=rax_to_dict(instance))
def delete_instance(module, name, wait, wait_timeout):
if not name:
module.fail_json(msg='name is required for the "rax_cdb" module')
changed = False
instance = find_instance(name)
if not instance:
module.exit_json(changed=False, action='delete')
try:
instance.delete()
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
if wait:
pyrax.utils.wait_until(instance, 'status', 'SHUTDOWN',
attempts=wait_timeout)
if wait and instance.status != 'SHUTDOWN':
module.fail_json(changed=changed, action='delete',
cdb=rax_to_dict(instance),
msg='Timeout waiting for "%s" databases instance to '
'be deleted' % name)
module.exit_json(changed=changed, action='delete',
cdb=rax_to_dict(instance))
def rax_cdb(module, state, name, flavor, volume, wait, wait_timeout):
# act on the state
if state == 'present':
save_instance(module, name, flavor, volume, wait, wait_timeout)
elif state == 'absent':
delete_instance(module, name, wait, wait_timeout)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
flavor=dict(type='int', default=1),
volume=dict(type='int', default=2),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
name = module.params.get('name')
flavor = module.params.get('flavor')
volume = module.params.get('volume')
state = module.params.get('state')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
rax_cdb(module, state, name, flavor, volume, wait, wait_timeout)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()

@ -1,186 +0,0 @@
#!/usr/bin/python -tt
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
module: rax_cdb_database
short_description: 'create / delete a database in the Cloud Databases'
description:
- create / delete a database in the Cloud Databases.
version_added: "1.8"
options:
cdb_id:
description:
- The databases server UUID
default: null
name:
description:
- Name to give to the database
default: null
character_set:
description:
- Set of symbols and encodings
default: 'utf8'
collate:
description:
- Set of rules for comparing characters in a character set
default: 'utf8_general_ci'
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
author: Simon JAILLET
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Build a database in Cloud Databases
tasks:
- name: Database build request
local_action:
module: rax_cdb_database
credentials: ~/.raxpub
region: IAD
cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
name: db1
state: present
register: rax_db_database
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def find_database(instance, name):
try:
database = instance.get_database(name)
except Exception:
return False
return database
def save_database(module, cdb_id, name, character_set, collate):
for arg, value in dict(cdb_id=cdb_id, name=name).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax_cdb_database" '
'module' % arg)
cdb = pyrax.cloud_databases
try:
instance = cdb.get(cdb_id)
except Exception, e:
module.fail_json(msg='%s' % e.message)
changed = False
database = find_database(instance, name)
if not database:
try:
database = instance.create_database(name=name,
character_set=character_set,
collate=collate)
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
module.exit_json(changed=changed, action='create',
database=rax_to_dict(database))
def delete_database(module, cdb_id, name):
for arg, value in dict(cdb_id=cdb_id, name=name).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax_cdb_database" '
'module' % arg)
cdb = pyrax.cloud_databases
try:
instance = cdb.get(cdb_id)
except Exception, e:
module.fail_json(msg='%s' % e.message)
changed = False
database = find_database(instance, name)
if database:
try:
database.delete()
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
module.exit_json(changed=changed, action='delete')
def rax_cdb_database(module, state, cdb_id, name, character_set, collate):
# act on the state
if state == 'present':
save_database(module, cdb_id, name, character_set, collate)
elif state == 'absent':
delete_database(module, cdb_id, name)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
cdb_id=dict(type='str', required=True),
name=dict(type='str', required=True),
character_set=dict(type='str', default='utf8'),
collate=dict(type='str', default='utf8_general_ci'),
state=dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
cdb_id = module.params.get('cdb_id')
name = module.params.get('name')
character_set = module.params.get('character_set')
collate = module.params.get('collate')
state = module.params.get('state')
setup_rax_module(module, pyrax)
rax_cdb_database(module, state, cdb_id, name, character_set, collate)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()

@ -1,220 +0,0 @@
#!/usr/bin/python -tt
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_cdb_user
short_description: create / delete a Rackspace Cloud Database
description:
- create / delete a database in the Cloud Databases.
version_added: "1.8"
options:
cdb_id:
description:
- The databases server UUID
default: null
db_username:
description:
- Name of the database user
default: null
db_password:
description:
- Database user password
default: null
databases:
description:
- Name of the databases that the user can access
default: []
host:
description:
- Specifies the host from which a user is allowed to connect to
the database. Possible values are a string containing an IPv4 address
or "%" to allow connecting from any host
default: '%'
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
author: Simon JAILLET
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Build a user in Cloud Databases
tasks:
- name: User build request
local_action:
module: rax_cdb_user
credentials: ~/.raxpub
region: IAD
cdb_id: 323e7ce0-9cb0-11e3-a5e2-0800200c9a66
db_username: user1
db_password: user1
databases: ['db1']
state: present
register: rax_db_user
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def find_user(instance, name):
try:
user = instance.get_user(name)
except Exception:
return False
return user
def save_user(module, cdb_id, name, password, databases, host):
for arg, value in dict(cdb_id=cdb_id, name=name).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax_cdb_user" '
'module' % arg)
cdb = pyrax.cloud_databases
try:
instance = cdb.get(cdb_id)
except Exception, e:
module.fail_json(msg='%s' % e.message)
changed = False
user = find_user(instance, name)
if not user:
action = 'create'
try:
user = instance.create_user(name=name,
password=password,
database_names=databases,
host=host)
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
else:
action = 'update'
if user.host != host:
changed = True
user.update(password=password, host=host)
former_dbs = set([item.name for item in user.list_user_access()])
databases = set(databases)
if databases != former_dbs:
try:
revoke_dbs = [db for db in former_dbs if db not in databases]
user.revoke_user_access(db_names=revoke_dbs)
new_dbs = [db for db in databases if db not in former_dbs]
user.grant_user_access(db_names=new_dbs)
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
module.exit_json(changed=changed, action=action, user=rax_to_dict(user))
def delete_user(module, cdb_id, name):
for arg, value in dict(cdb_id=cdb_id, name=name).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax_cdb_user"'
' module' % arg)
cdb = pyrax.cloud_databases
try:
instance = cdb.get(cdb_id)
except Exception, e:
module.fail_json(msg='%s' % e.message)
changed = False
user = find_user(instance, name)
if user:
try:
user.delete()
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
changed = True
module.exit_json(changed=changed, action='delete')
def rax_cdb_user(module, state, cdb_id, name, password, databases, host):
# act on the state
if state == 'present':
save_user(module, cdb_id, name, password, databases, host)
elif state == 'absent':
delete_user(module, cdb_id, name)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
cdb_id=dict(type='str', required=True),
db_username=dict(type='str', required=True),
db_password=dict(type='str', required=True, no_log=True),
databases=dict(type='list', default=[]),
host=dict(type='str', default='%'),
state=dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
cdb_id = module.params.get('cdb_id')
name = module.params.get('db_username')
password = module.params.get('db_password')
databases = module.params.get('databases')
host = unicode(module.params.get('host'))
state = module.params.get('state')
setup_rax_module(module, pyrax)
rax_cdb_user(module, state, cdb_id, name, password, databases, host)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()

@ -1,303 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_clb
short_description: create / delete a load balancer in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud load balancer.
version_added: "1.4"
options:
algorithm:
description:
- algorithm for the balancer being created
choices:
- RANDOM
- LEAST_CONNECTIONS
- ROUND_ROBIN
- WEIGHTED_LEAST_CONNECTIONS
- WEIGHTED_ROUND_ROBIN
default: LEAST_CONNECTIONS
meta:
description:
- A hash of metadata to associate with the instance
default: null
name:
description:
- Name to give the load balancer
default: null
port:
description:
- Port for the balancer being created
default: 80
protocol:
description:
- Protocol for the balancer being created
choices:
- DNS_TCP
- DNS_UDP
- FTP
- HTTP
- HTTPS
- IMAPS
- IMAPv4
- LDAP
- LDAPS
- MYSQL
- POP3
- POP3S
- SMTP
- TCP
- TCP_CLIENT_FIRST
- UDP
- UDP_STREAM
- SFTP
default: HTTP
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
timeout:
description:
- timeout for communication between the balancer and the node
default: 30
type:
description:
- type of interface for the balancer being created
choices:
- PUBLIC
- SERVICENET
default: PUBLIC
vip_id:
description:
- Virtual IP ID to use when creating the load balancer for purposes of
sharing an IP with another load balancer of another protocol
version_added: 1.5
wait:
description:
- wait for the balancer to be in state 'running' before returning
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author: Christopher H. Laco, Matt Martz
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Build a Load Balancer
gather_facts: False
hosts: local
connection: local
tasks:
- name: Load Balancer create request
local_action:
module: rax_clb
credentials: ~/.raxpub
name: my-lb
port: 8080
protocol: HTTP
type: SERVICENET
timeout: 30
region: DFW
wait: yes
state: present
meta:
app: my-cool-app
register: my_lb
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
vip_type, timeout, wait, wait_timeout, vip_id):
for arg in (state, name, port, protocol, vip_type):
if not arg:
module.fail_json(msg='%s is required for rax_clb' % arg)
if int(timeout) < 30:
module.fail_json(msg='"timeout" must be greater than or equal to 30')
changed = False
balancers = []
clb = pyrax.cloud_loadbalancers
if not clb:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
for balancer in clb.list():
if name != balancer.name and name != balancer.id:
continue
balancers.append(balancer)
if len(balancers) > 1:
module.fail_json(msg='Multiple Load Balancers were matched by name, '
'try using the Load Balancer ID instead')
if state == 'present':
if isinstance(meta, dict):
metadata = [dict(key=k, value=v) for k, v in meta.items()]
if not balancers:
try:
virtual_ips = [clb.VirtualIP(type=vip_type, id=vip_id)]
balancer = clb.create(name, metadata=metadata, port=port,
algorithm=algorithm, protocol=protocol,
timeout=timeout, virtual_ips=virtual_ips)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
balancer = balancers[0]
setattr(balancer, 'metadata',
[dict(key=k, value=v) for k, v in
balancer.get_metadata().items()])
atts = {
'name': name,
'algorithm': algorithm,
'port': port,
'protocol': protocol,
'timeout': timeout
}
for att, value in atts.iteritems():
current = getattr(balancer, att)
if current != value:
changed = True
if changed:
balancer.update(**atts)
if balancer.metadata != metadata:
balancer.set_metadata(meta)
changed = True
virtual_ips = [clb.VirtualIP(type=vip_type)]
current_vip_types = set([v.type for v in balancer.virtual_ips])
vip_types = set([v.type for v in virtual_ips])
if current_vip_types != vip_types:
module.fail_json(msg='Load balancer Virtual IP type cannot '
'be changed')
if wait:
attempts = wait_timeout / 5
pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
balancer.get()
instance = rax_to_dict(balancer, 'clb')
result = dict(changed=changed, balancer=instance)
if balancer.status == 'ERROR':
result['msg'] = '%s failed to build' % balancer.id
elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
result['msg'] = 'Timeout waiting on %s' % balancer.id
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
elif state == 'absent':
if balancers:
balancer = balancers[0]
try:
balancer.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
instance = rax_to_dict(balancer, 'clb')
if wait:
attempts = wait_timeout / 5
pyrax.utils.wait_until(balancer, 'status', ('DELETED'),
interval=5, attempts=attempts)
else:
instance = {}
module.exit_json(changed=changed, balancer=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
algorithm=dict(choices=CLB_ALGORITHMS,
default='LEAST_CONNECTIONS'),
meta=dict(type='dict', default={}),
name=dict(),
port=dict(type='int', default=80),
protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'),
state=dict(default='present', choices=['present', 'absent']),
timeout=dict(type='int', default=30),
type=dict(choices=['PUBLIC', 'SERVICENET'], default='PUBLIC'),
vip_id=dict(),
wait=dict(type='bool'),
wait_timeout=dict(default=300),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
algorithm = module.params.get('algorithm')
meta = module.params.get('meta')
name = module.params.get('name')
port = module.params.get('port')
protocol = module.params.get('protocol')
state = module.params.get('state')
timeout = int(module.params.get('timeout'))
vip_id = module.params.get('vip_id')
vip_type = module.params.get('type')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
setup_rax_module(module, pyrax)
cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
vip_type, timeout, wait, wait_timeout, vip_id)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()

@ -1,303 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_clb_nodes
short_description: add, modify and remove nodes from a Rackspace Cloud Load Balancer
description:
- Adds, modifies and removes nodes from a Rackspace Cloud Load Balancer
version_added: "1.4"
options:
address:
required: false
description:
- IP address or domain name of the node
condition:
required: false
choices:
- enabled
- disabled
- draining
description:
- Condition for the node, which determines its role within the load
balancer
load_balancer_id:
required: true
type: integer
description:
- Load balancer id
node_id:
required: false
type: integer
description:
- Node id
port:
required: false
type: integer
description:
- Port number of the load balanced service on the node
state:
required: false
default: "present"
choices:
- present
- absent
description:
- Indicate desired state of the node
type:
required: false
choices:
- primary
- secondary
description:
- Type of node
wait:
required: false
default: "no"
choices:
- "yes"
- "no"
description:
- Wait for the load balancer to become active before returning
wait_timeout:
required: false
type: integer
default: 30
description:
- How long to wait before giving up and returning an error
weight:
required: false
description:
- Weight of node
author: Lukasz Kawczynski
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
# Add a new node to the load balancer
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
address: 10.2.2.3
port: 80
condition: enabled
type: primary
wait: yes
credentials: /path/to/credentials
# Drain connections from a node
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
node_id: 410
condition: draining
wait: yes
credentials: /path/to/credentials
# Remove a node from the load balancer
- local_action:
module: rax_clb_nodes
load_balancer_id: 71
node_id: 410
state: absent
wait: yes
credentials: /path/to/credentials
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def _activate_virtualenv(path):
path = os.path.expanduser(path)
activate_this = os.path.join(path, 'bin', 'activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
def _get_node(lb, node_id=None, address=None, port=None):
"""Return a matching node"""
for node in getattr(lb, 'nodes', []):
match_list = []
if node_id is not None:
match_list.append(getattr(node, 'id', None) == node_id)
if address is not None:
match_list.append(getattr(node, 'address', None) == address)
if port is not None:
match_list.append(getattr(node, 'port', None) == port)
if match_list and all(match_list):
return node
return None
def _is_primary(node):
"""Return True if node is primary and enabled"""
return (node.type.lower() == 'primary' and
node.condition.lower() == 'enabled')
def _get_primary_nodes(lb):
"""Return a list of primary and enabled nodes"""
nodes = []
for node in lb.nodes:
if _is_primary(node):
nodes.append(node)
return nodes
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
address=dict(),
condition=dict(choices=['enabled', 'disabled', 'draining']),
load_balancer_id=dict(required=True, type='int'),
node_id=dict(type='int'),
port=dict(type='int'),
state=dict(default='present', choices=['present', 'absent']),
type=dict(choices=['primary', 'secondary']),
virtualenv=dict(),
wait=dict(default=False, type='bool'),
wait_timeout=dict(default=30, type='int'),
weight=dict(type='int'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
address = module.params['address']
condition = (module.params['condition'] and
module.params['condition'].upper())
load_balancer_id = module.params['load_balancer_id']
node_id = module.params['node_id']
port = module.params['port']
state = module.params['state']
typ = module.params['type'] and module.params['type'].upper()
virtualenv = module.params['virtualenv']
wait = module.params['wait']
wait_timeout = module.params['wait_timeout'] or 1
weight = module.params['weight']
if virtualenv:
try:
_activate_virtualenv(virtualenv)
except IOError, e:
module.fail_json(msg='Failed to activate virtualenv %s (%s)' % (
virtualenv, e))
setup_rax_module(module, pyrax)
if not pyrax.cloud_loadbalancers:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
try:
lb = pyrax.cloud_loadbalancers.get(load_balancer_id)
except pyrax.exc.PyraxException, e:
module.fail_json(msg='%s' % e.message)
node = _get_node(lb, node_id, address, port)
result = rax_clb_node_to_dict(node)
if state == 'absent':
if not node: # Removing a non-existent node
module.exit_json(changed=False, state=state)
# The API detects this as well but currently pyrax does not return a
# meaningful error message
if _is_primary(node) and len(_get_primary_nodes(lb)) == 1:
module.fail_json(
msg='At least one primary node has to be enabled')
try:
lb.delete_node(node)
result = {}
except pyrax.exc.NotFound:
module.exit_json(changed=False, state=state)
except pyrax.exc.PyraxException, e:
module.fail_json(msg='%s' % e.message)
else: # present
if not node:
if node_id: # Updating a non-existent node
msg = 'Node %d not found' % node_id
if lb.nodes:
msg += (' (available nodes: %s)' %
', '.join([str(x.id) for x in lb.nodes]))
module.fail_json(msg=msg)
else: # Creating a new node
try:
node = pyrax.cloudloadbalancers.Node(
address=address, port=port, condition=condition,
weight=weight, type=typ)
resp, body = lb.add_nodes([node])
result.update(body['nodes'][0])
except pyrax.exc.PyraxException, e:
module.fail_json(msg='%s' % e.message)
else: # Updating an existing node
mutable = {
'condition': condition,
'type': typ,
'weight': weight,
}
for name, value in mutable.items():
if value is None or value == getattr(node, name):
mutable.pop(name)
if not mutable:
module.exit_json(changed=False, state=state, node=result)
try:
# The diff has to be set explicitly to update node's weight and
# type; this should probably be fixed in pyrax
lb.update_node(node, diff=mutable)
result.update(mutable)
except pyrax.exc.PyraxException, e:
module.fail_json(msg='%s' % e.message)
if wait:
pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1,
attempts=wait_timeout)
if lb.status != 'ACTIVE':
module.fail_json(
msg='Load balancer not active after %ds (current status: %s)' %
(wait_timeout, lb.status.lower()))
kwargs = {'node': result} if result else {}
module.exit_json(changed=True, state=state, **kwargs)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()

@ -1,173 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_dns
short_description: Manage domains on Rackspace Cloud DNS
description:
- Manage domains on Rackspace Cloud DNS
version_added: 1.5
options:
comment:
description:
- Brief description of the domain. Maximum length of 160 characters
email:
desctiption:
- Email address of the domain administrator
name:
description:
- Domain name to create
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
ttl:
description:
- Time to live of domain in seconds
default: 3600
notes:
- "It is recommended that plays utilizing this module be run with
C(serial: 1) to avoid exceeding the API request limit imposed by
the Rackspace CloudDNS API"
author: Matt Martz
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Create domain
hosts: all
gather_facts: False
tasks:
- name: Domain create request
local_action:
module: rax_dns
credentials: ~/.raxpub
name: example.org
email: admin@example.org
register: rax_dns
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_dns(module, comment, email, name, state, ttl):
changed = False
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
if not email:
module.fail_json(msg='An "email" attribute is required for '
'creating a domain')
try:
domain = dns.find(name=name)
except pyrax.exceptions.NoUniqueMatch, e:
module.fail_json(msg='%s' % e.message)
except pyrax.exceptions.NotFound:
try:
domain = dns.create(name=name, emailAddress=email, ttl=ttl,
comment=comment)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
update = {}
if comment != getattr(domain, 'comment', None):
update['comment'] = comment
if ttl != getattr(domain, 'ttl', None):
update['ttl'] = ttl
if email != getattr(domain, 'emailAddress', None):
update['emailAddress'] = email
if update:
try:
domain.update(**update)
changed = True
domain.get()
except Exception, e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
domain = dns.find(name=name)
except pyrax.exceptions.NotFound:
domain = {}
pass
except Exception, e:
module.fail_json(msg='%s' % e.message)
if domain:
try:
domain.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, domain=rax_to_dict(domain))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
comment=dict(),
email=dict(),
name=dict(),
state=dict(default='present', choices=['present', 'absent']),
ttl=dict(type='int', default=3600),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
comment = module.params.get('comment')
email = module.params.get('email')
name = module.params.get('name')
state = module.params.get('state')
ttl = module.params.get('ttl')
setup_rax_module(module, pyrax, False)
rax_dns(module, comment, email, name, state, ttl)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()

@ -1,335 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_dns_record
short_description: Manage DNS records on Rackspace Cloud DNS
description:
- Manage DNS records on Rackspace Cloud DNS
version_added: 1.5
options:
comment:
description:
- Brief description of the domain. Maximum length of 160 characters
data:
description:
- IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
SRV/TXT
required: True
domain:
description:
- Domain name to create the record in. This is an invalid option when
type=PTR
loadbalancer:
description:
- Load Balancer ID to create a PTR record for. Only used with type=PTR
version_added: 1.7
name:
description:
- FQDN record name to create
required: True
priority:
description:
- Required for MX and SRV records, but forbidden for other record types.
If specified, must be an integer from 0 to 65535.
server:
description:
- Server ID to create a PTR record for. Only used with type=PTR
version_added: 1.7
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
ttl:
description:
- Time to live of record in seconds
default: 3600
type:
description:
- DNS record type
choices:
- A
- AAAA
- CNAME
- MX
- NS
- SRV
- TXT
- PTR
required: true
notes:
- "It is recommended that plays utilizing this module be run with
C(serial: 1) to avoid exceeding the API request limit imposed by
the Rackspace CloudDNS API"
- To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be
supplied
- As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
- C(PTR) record support was added in version 1.7
author: Matt Martz
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Create DNS Records
hosts: all
gather_facts: False
tasks:
- name: Create A record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
domain: example.org
name: www.example.org
data: "{{ rax_accessipv4 }}"
type: A
register: a_record
- name: Create PTR record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
server: "{{ rax_id }}"
name: "{{ inventory_hostname }}"
region: DFW
register: ptr_record
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,
name=None, server=None, state='present', ttl=7200):
changed = False
results = []
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if loadbalancer:
item = rax_find_loadbalancer(module, pyrax, loadbalancer)
elif server:
item = rax_find_server(module, pyrax, server)
if state == 'present':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
if record.ttl != ttl or record.name != name:
try:
dns.update_ptr_record(item, record, name, data, ttl)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
record.ttl = ttl
record.name = name
results.append(rax_to_dict(record))
break
else:
results.append(rax_to_dict(record))
break
if not results:
record = dict(name=name, type='PTR', data=data, ttl=ttl,
comment=comment)
try:
results = dns.add_ptr_records(item, [record])
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
elif state == 'absent':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
results.append(rax_to_dict(record))
break
if results:
try:
dns.delete_ptr_records(item, data)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
priority=None, record_type='A', state='present', ttl=7200):
"""Function for manipulating record types other than PTR"""
changed = False
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
if not priority and record_type in ['MX', 'SRV']:
module.fail_json(msg='A "priority" attribute is required for '
'creating a MX or SRV record')
try:
domain = dns.find(name=domain)
except Exception, e:
module.fail_json(msg='%s' % e.message)
try:
record = domain.find_record(record_type, name=name)
except pyrax.exceptions.DomainRecordNotUnique, e:
module.fail_json(msg='%s' % e.message)
except pyrax.exceptions.DomainRecordNotFound, e:
try:
record_data = {
'type': record_type,
'name': name,
'data': data,
'ttl': ttl
}
if comment:
record_data.update(dict(comment=comment))
if priority and record_type.upper() in ['MX', 'SRV']:
record_data.update(dict(priority=priority))
record = domain.add_records([record_data])[0]
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
update = {}
if comment != getattr(record, 'comment', None):
update['comment'] = comment
if ttl != getattr(record, 'ttl', None):
update['ttl'] = ttl
if priority != getattr(record, 'priority', None):
update['priority'] = priority
if data != getattr(record, 'data', None):
update['data'] = data
if update:
try:
record.update(**update)
changed = True
record.get()
except Exception, e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
domain = dns.find(name=domain)
except Exception, e:
module.fail_json(msg='%s' % e.message)
try:
record = domain.find_record(record_type, name=name, data=data)
except pyrax.exceptions.DomainRecordNotFound, e:
record = {}
pass
except pyrax.exceptions.DomainRecordNotUnique, e:
module.fail_json(msg='%s' % e.message)
if record:
try:
record.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, record=rax_to_dict(record))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
comment=dict(),
data=dict(required=True),
domain=dict(),
loadbalancer=dict(),
name=dict(required=True),
priority=dict(type='int'),
server=dict(),
state=dict(default='present', choices=['present', 'absent']),
ttl=dict(type='int', default=3600),
type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS',
'SRV', 'TXT', 'PTR'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
mutually_exclusive=[
['server', 'loadbalancer', 'domain'],
],
required_one_of=[
['server', 'loadbalancer', 'domain'],
],
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
comment = module.params.get('comment')
data = module.params.get('data')
domain = module.params.get('domain')
loadbalancer = module.params.get('loadbalancer')
name = module.params.get('name')
priority = module.params.get('priority')
server = module.params.get('server')
state = module.params.get('state')
ttl = module.params.get('ttl')
record_type = module.params.get('type')
setup_rax_module(module, pyrax, False)
if record_type.upper() == 'PTR':
if not server and not loadbalancer:
module.fail_json(msg='one of the following is required: '
'server,loadbalancer')
rax_dns_record_ptr(module, data=data, comment=comment,
loadbalancer=loadbalancer, name=name, server=server,
state=state, ttl=ttl)
else:
rax_dns_record(module, comment=comment, data=data, domain=domain,
name=name, priority=priority, record_type=record_type,
state=state, ttl=ttl)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()

@ -1,144 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_facts
short_description: Gather facts for Rackspace Cloud Servers
description:
- Gather facts for Rackspace Cloud Servers.
version_added: "1.4"
options:
address:
description:
- Server IP address to retrieve facts for, will match any IP assigned to
the server
id:
description:
- Server ID to retrieve facts for
name:
description:
- Server name to retrieve facts for
default: null
author: Matt Martz
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Gather info about servers
hosts: all
gather_facts: False
tasks:
- name: Get facts about servers
local_action:
module: rax_facts
credentials: ~/.raxpub
name: "{{ inventory_hostname }}"
region: DFW
- name: Map some facts
set_fact:
ansible_ssh_host: "{{ rax_accessipv4 }}"
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_facts(module, address, name, server_id):
changed = False
cs = pyrax.cloudservers
if cs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
ansible_facts = {}
search_opts = {}
if name:
search_opts = dict(name='^%s$' % name)
try:
servers = cs.servers.list(search_opts=search_opts)
except Exception, e:
module.fail_json(msg='%s' % e.message)
elif address:
servers = []
try:
for server in cs.servers.list():
for addresses in server.networks.values():
if address in addresses:
servers.append(server)
break
except Exception, e:
module.fail_json(msg='%s' % e.message)
elif server_id:
servers = []
try:
servers.append(cs.servers.get(server_id))
except Exception, e:
pass
if len(servers) > 1:
module.fail_json(msg='Multiple servers found matching provided '
'search parameters')
elif len(servers) == 1:
ansible_facts = rax_to_dict(servers[0], 'server')
module.exit_json(changed=changed, ansible_facts=ansible_facts)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
address=dict(),
id=dict(),
name=dict(),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
mutually_exclusive=[['address', 'id', 'name']],
required_one_of=[['address', 'id', 'name']],
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
address = module.params.get('address')
server_id = module.params.get('id')
name = module.params.get('name')
setup_rax_module(module, pyrax)
rax_facts(module, address, name, server_id)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()

@ -1,379 +0,0 @@
#!/usr/bin/python
# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_files
short_description: Manipulate Rackspace Cloud Files Containers
description:
- Manipulate Rackspace Cloud Files Containers
version_added: "1.5"
options:
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing containers.
Selecting this option is only appropriate when setting type=meta
choices:
- "yes"
- "no"
default: "no"
container:
description:
- The container to use for container or metadata operations.
required: true
meta:
description:
- A hash of items to set as metadata values on a container
private:
description:
- Used to set a container as private, removing it from the CDN. B(Warning!)
Private containers, if previously made public, can have live objects
available until the TTL on cached objects expires
public:
description:
- Used to set a container as public, available via the Cloud Files CDN
region:
description:
- Region to create an instance in
default: DFW
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
ttl:
description:
- In seconds, set a container-wide TTL for all objects cached on CDN edge nodes.
Setting a TTL is only appropriate for containers that are public
type:
description:
- Type of object to do work on, i.e. metadata object or a container object
choices:
- file
- meta
default: file
web_error:
description:
- Sets an object to be presented as the HTTP error page when accessed by the CDN URL
web_index:
description:
- Sets an object to be presented as the HTTP index page when accessed by the CDN URL
author: Paul Durivage
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: "Test Cloud Files Containers"
hosts: local
gather_facts: no
tasks:
- name: "List all containers"
rax_files: state=list
- name: "Create container called 'mycontainer'"
rax_files: container=mycontainer
- name: "Create container 'mycontainer2' with metadata"
rax_files:
container: mycontainer2
meta:
key: value
file_for: someuser@example.com
- name: "Set a container's web index page"
rax_files: container=mycontainer web_index=index.html
- name: "Set a container's web error page"
rax_files: container=mycontainer web_error=error.html
- name: "Make container public"
rax_files: container=mycontainer public=yes
- name: "Make container public with a 24 hour TTL"
rax_files: container=mycontainer public=yes ttl=86400
- name: "Make container private"
rax_files: container=mycontainer private=yes
- name: "Test Cloud Files Containers Metadata Storage"
hosts: local
gather_facts: no
tasks:
- name: "Get mycontainer2 metadata"
rax_files:
container: mycontainer2
type: meta
- name: "Set mycontainer2 metadata"
rax_files:
container: mycontainer2
type: meta
meta:
uploaded_by: someuser@example.com
- name: "Remove mycontainer2 metadata"
rax_files:
container: "mycontainer2"
type: meta
state: absent
meta:
key: ""
file_for: ""
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError, e:
HAS_PYRAX = False
EXIT_DICT = dict(success=True)
META_PREFIX = 'x-container-meta-'
def _get_container(module, cf, container):
try:
return cf.get_container(container)
except pyrax.exc.NoSuchContainer, e:
module.fail_json(msg=e.message)
def _fetch_meta(module, container):
EXIT_DICT['meta'] = dict()
try:
for k, v in container.get_metadata().items():
split_key = k.split(META_PREFIX)[-1]
EXIT_DICT['meta'][split_key] = v
except Exception, e:
module.fail_json(msg=e.message)
def meta(cf, module, container_, state, meta_, clear_meta):
c = _get_container(module, cf, container_)
if meta_ and state == 'present':
try:
meta_set = c.set_metadata(meta_, clear=clear_meta)
except Exception, e:
module.fail_json(msg=e.message)
elif meta_ and state == 'absent':
remove_results = []
for k, v in meta_.items():
c.remove_metadata_key(k)
remove_results.append(k)
EXIT_DICT['deleted_meta_keys'] = remove_results
elif state == 'absent':
remove_results = []
for k, v in c.get_metadata().items():
c.remove_metadata_key(k)
remove_results.append(k)
EXIT_DICT['deleted_meta_keys'] = remove_results
_fetch_meta(module, c)
_locals = locals().keys()
EXIT_DICT['container'] = c.name
if 'meta_set' in _locals or 'remove_results' in _locals:
EXIT_DICT['changed'] = True
module.exit_json(**EXIT_DICT)
def container(cf, module, container_, state, meta_, clear_meta, ttl, public,
private, web_index, web_error):
if public and private:
module.fail_json(msg='container cannot be simultaneously '
'set to public and private')
if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error):
module.fail_json(msg='state cannot be omitted when setting/removing '
'attributes on a container')
if state == 'list':
# We don't care if attributes are specified, let's list containers
EXIT_DICT['containers'] = cf.list_containers()
module.exit_json(**EXIT_DICT)
try:
c = cf.get_container(container_)
except pyrax.exc.NoSuchContainer, e:
# Make the container if state=present, otherwise bomb out
if state == 'present':
try:
c = cf.create_container(container_)
except Exception, e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['changed'] = True
EXIT_DICT['created'] = True
else:
module.fail_json(msg=e.message)
else:
# Successfully grabbed a container object
# Delete if state is absent
if state == 'absent':
try:
cont_deleted = c.delete()
except Exception, e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['deleted'] = True
if meta_:
try:
meta_set = c.set_metadata(meta_, clear=clear_meta)
except Exception, e:
module.fail_json(msg=e.message)
finally:
_fetch_meta(module, c)
if ttl:
try:
c.cdn_ttl = ttl
except Exception, e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['ttl'] = c.cdn_ttl
if public:
try:
cont_public = c.make_public()
except Exception, e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['container_urls'] = dict(url=c.cdn_uri,
ssl_url=c.cdn_ssl_uri,
streaming_url=c.cdn_streaming_uri,
ios_uri=c.cdn_ios_uri)
if private:
try:
cont_private = c.make_private()
except Exception, e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_private'] = True
if web_index:
try:
cont_web_index = c.set_web_index_page(web_index)
except Exception, e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_index'] = True
finally:
_fetch_meta(module, c)
if web_error:
try:
cont_err_index = c.set_web_error_page(web_error)
except Exception, e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_error'] = True
finally:
_fetch_meta(module, c)
EXIT_DICT['container'] = c.name
EXIT_DICT['objs_in_container'] = c.object_count
EXIT_DICT['total_bytes'] = c.total_bytes
_locals = locals().keys()
if ('cont_deleted' in _locals
or 'meta_set' in _locals
or 'cont_public' in _locals
or 'cont_private' in _locals
or 'cont_web_index' in _locals
or 'cont_err_index' in _locals):
EXIT_DICT['changed'] = True
module.exit_json(**EXIT_DICT)
def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
private, web_index, web_error):
""" Dispatch from here to work with metadata or file objects """
cf = pyrax.cloudfiles
if cf is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if typ == "container":
container(cf, module, container_, state, meta_, clear_meta, ttl,
public, private, web_index, web_error)
else:
meta(cf, module, container_, state, meta_, clear_meta)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
container=dict(),
state=dict(choices=['present', 'absent', 'list'],
default='present'),
meta=dict(type='dict', default=dict()),
clear_meta=dict(default=False, type='bool'),
type=dict(choices=['container', 'meta'], default='container'),
ttl=dict(type='int'),
public=dict(default=False, type='bool'),
private=dict(default=False, type='bool'),
web_index=dict(),
web_error=dict()
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
container_ = module.params.get('container')
state = module.params.get('state')
meta_ = module.params.get('meta')
clear_meta = module.params.get('clear_meta')
typ = module.params.get('type')
ttl = module.params.get('ttl')
public = module.params.get('public')
private = module.params.get('private')
web_index = module.params.get('web_index')
web_error = module.params.get('web_error')
if state in ['present', 'absent'] and not container_:
module.fail_json(msg='please specify a container name')
if clear_meta and not typ == 'meta':
module.fail_json(msg='clear_meta can only be used when setting '
'metadata')
setup_rax_module(module, pyrax)
cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
private, web_index, web_error)
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
main()

@ -1,603 +0,0 @@
#!/usr/bin/python
# (c) 2013, Paul Durivage <paul.durivage@rackspace.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_files_objects
short_description: Upload, download, and delete objects in Rackspace Cloud Files
description:
- Upload, download, and delete objects in Rackspace Cloud Files
version_added: "1.5"
options:
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing objects.
Selecting this option is only appropriate when setting type=meta
choices:
- "yes"
- "no"
default: "no"
container:
description:
- The container to use for file object operations.
required: true
default: null
dest:
description:
- The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder".
Used to specify the destination of an operation on a remote object; i.e. a file name,
"file1", or a comma-separated list of remote objects, "file1,file2,file17"
expires:
description:
- Used to set an expiration on a file or folder uploaded to Cloud Files.
Requires an integer, specifying expiration in seconds
default: null
meta:
description:
- A hash of items to set as metadata values on an uploaded file or folder
default: null
method:
description:
- The method of operation to be performed. For example, put to upload files
to Cloud Files, get to download files from Cloud Files or delete to delete
remote objects in Cloud Files
choices:
- get
- put
- delete
default: get
src:
description:
- Source from which to upload files. Used to specify a remote object as a source for
an operation, i.e. a file name, "file1", or a comma-separated list of remote objects,
"file1,file2,file17". src and dest are mutually exclusive on remote-only object operations
default: null
structure:
description:
- Used to specify whether to maintain nested directory structure when downloading objects
from Cloud Files. Setting to false downloads the contents of a container to a single,
flat directory
choices:
- yes
- "no"
default: "yes"
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
type:
description:
- Type of object to do work on
- Metadata object or a file object
choices:
- file
- meta
default: file
author: Paul Durivage
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: "Test Cloud Files Objects"
hosts: local
gather_facts: False
tasks:
- name: "Get objects from test container"
rax_files_objects: container=testcont dest=~/Downloads/testcont
- name: "Get single object from test container"
rax_files_objects: container=testcont src=file1 dest=~/Downloads/testcont
- name: "Get several objects from test container"
rax_files_objects: container=testcont src=file1,file2,file3 dest=~/Downloads/testcont
- name: "Delete one object in test container"
rax_files_objects: container=testcont method=delete dest=file1
- name: "Delete several objects in test container"
rax_files_objects: container=testcont method=delete dest=file2,file3,file4
- name: "Delete all objects in test container"
rax_files_objects: container=testcont method=delete
- name: "Upload all files to test container"
rax_files_objects: container=testcont method=put src=~/Downloads/onehundred
- name: "Upload one file to test container"
rax_files_objects: container=testcont method=put src=~/Downloads/testcont/file1
- name: "Upload one file to test container with metadata"
rax_files_objects:
container: testcont
src: ~/Downloads/testcont/file2
method: put
meta:
testkey: testdata
who_uploaded_this: someuser@example.com
- name: "Upload one file to test container with TTL of 60 seconds"
rax_files_objects: container=testcont method=put src=~/Downloads/testcont/file3 expires=60
- name: "Attempt to get remote object that does not exist"
rax_files_objects: container=testcont method=get src=FileThatDoesNotExist.jpg dest=~/Downloads/testcont
ignore_errors: yes
- name: "Attempt to delete remote object that does not exist"
rax_files_objects: container=testcont method=delete dest=FileThatDoesNotExist.jpg
ignore_errors: yes
- name: "Test Cloud Files Objects Metadata"
hosts: local
gather_facts: false
tasks:
- name: "Get metadata on one object"
rax_files_objects: container=testcont type=meta dest=file2
- name: "Get metadata on several objects"
rax_files_objects: container=testcont type=meta src=file2,file1
- name: "Set metadata on an object"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: put
meta:
key1: value1
key2: value2
clear_meta: true
- name: "Verify metadata is set"
rax_files_objects: container=testcont type=meta src=file17
- name: "Delete metadata"
rax_files_objects:
container: testcont
type: meta
dest: file17
method: delete
meta:
key1: ''
key2: ''
- name: "Get metadata on all objects"
rax_files_objects: container=testcont type=meta
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
EXIT_DICT = dict(success=False)
META_PREFIX = 'x-object-meta-'
def _get_container(module, cf, container):
try:
return cf.get_container(container)
except pyrax.exc.NoSuchContainer, e:
module.fail_json(msg=e.message)
def upload(module, cf, container, src, dest, meta, expires):
""" Uploads a single object or a folder to Cloud Files Optionally sets an
metadata, TTL value (expires), or Content-Disposition and Content-Encoding
headers.
"""
c = _get_container(module, cf, container)
num_objs_before = len(c.get_object_names())
if not src:
module.fail_json(msg='src must be specified when uploading')
src = os.path.abspath(os.path.expanduser(src))
is_dir = os.path.isdir(src)
if not is_dir and not os.path.isfile(src) or not os.path.exists(src):
module.fail_json(msg='src must be a file or a directory')
if dest and is_dir:
module.fail_json(msg='dest cannot be set when whole '
'directories are uploaded')
cont_obj = None
if dest and not is_dir:
try:
cont_obj = c.upload_file(src, obj_name=dest, ttl=expires)
except Exception, e:
module.fail_json(msg=e.message)
elif is_dir:
try:
id, total_bytes = cf.upload_folder(src, container=c.name, ttl=expires)
except Exception, e:
module.fail_json(msg=e.message)
while True:
bytes = cf.get_uploaded(id)
if bytes == total_bytes:
break
time.sleep(1)
else:
try:
cont_obj = c.upload_file(src, ttl=expires)
except Exception, e:
module.fail_json(msg=e.message)
num_objs_after = len(c.get_object_names())
if not meta:
meta = dict()
meta_result = dict()
if meta:
if cont_obj:
meta_result = cont_obj.set_metadata(meta)
else:
def _set_meta(objs, meta):
""" Sets metadata on a list of objects specified by name """
for obj in objs:
try:
result = c.get_object(obj).set_metadata(meta)
except Exception, e:
module.fail_json(msg=e.message)
else:
meta_result[obj] = result
return meta_result
def _walker(objs, path, filenames):
""" Callback func for os.path.walk """
prefix = ''
if path != src:
prefix = path.split(src)[-1].lstrip('/')
filenames = [os.path.join(prefix, name) for name in filenames
if not os.path.isdir(name)]
objs += filenames
_objs = []
os.path.walk(src, _walker, _objs)
meta_result = _set_meta(_objs, meta)
EXIT_DICT['success'] = True
EXIT_DICT['container'] = c.name
EXIT_DICT['msg'] = "Uploaded %s to container: %s" % (src, c.name)
if cont_obj or locals().get('bytes'):
EXIT_DICT['changed'] = True
if meta_result:
EXIT_DICT['meta'] = dict(updated=True)
if cont_obj:
EXIT_DICT['bytes'] = cont_obj.total_bytes
EXIT_DICT['etag'] = cont_obj.etag
else:
EXIT_DICT['bytes'] = total_bytes
module.exit_json(**EXIT_DICT)
def download(module, cf, container, src, dest, structure):
""" Download objects from Cloud Files to a local path specified by "dest".
Optionally disable maintaining a directory structure by by passing a
false value to "structure".
"""
# Looking for an explicit destination
if not dest:
module.fail_json(msg='dest is a required argument when '
'downloading from Cloud Files')
# Attempt to fetch the container by name
c = _get_container(module, cf, container)
# Accept a single object name or a comma-separated list of objs
# If not specified, get the entire container
if src:
objs = src.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
dest = os.path.abspath(os.path.expanduser(dest))
is_dir = os.path.isdir(dest)
if not is_dir:
module.fail_json(msg='dest must be a directory')
results = []
for obj in objs:
try:
c.download_object(obj, dest, structure=structure)
except Exception, e:
module.fail_json(msg=e.message)
else:
results.append(obj)
len_results = len(results)
len_objs = len(objs)
EXIT_DICT['container'] = c.name
EXIT_DICT['requested_downloaded'] = results
if results:
EXIT_DICT['changed'] = True
if len_results == len_objs:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects downloaded to %s" % (len_results, dest)
else:
EXIT_DICT['msg'] = "Error: only %s of %s objects were " \
"downloaded" % (len_results, len_objs)
module.exit_json(**EXIT_DICT)
def delete(module, cf, container, src, dest):
""" Delete specific objects by proving a single file name or a
comma-separated list to src OR dest (but not both). Omitting file name(s)
assumes the entire container is to be deleted.
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
c = _get_container(module, cf, container)
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
num_objs = len(objs)
results = []
for obj in objs:
try:
result = c.delete_object(obj)
except Exception, e:
module.fail_json(msg=e.message)
else:
results.append(result)
num_deleted = results.count(True)
EXIT_DICT['container'] = c.name
EXIT_DICT['deleted'] = num_deleted
EXIT_DICT['requested_deleted'] = objs
if num_deleted:
EXIT_DICT['changed'] = True
if num_objs == num_deleted:
EXIT_DICT['success'] = True
EXIT_DICT['msg'] = "%s objects deleted" % num_deleted
else:
EXIT_DICT['msg'] = ("Error: only %s of %s objects "
"deleted" % (num_deleted, num_objs))
module.exit_json(**EXIT_DICT)
def get_meta(module, cf, container, src, dest):
""" Get metadata for a single file, comma-separated list, or entire
container
"""
c = _get_container(module, cf, container)
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to be deleted "
"have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
if objs:
objs = objs.split(',')
objs = map(str.strip, objs)
else:
objs = c.get_object_names()
results = dict()
for obj in objs:
try:
meta = c.get_object(obj).get_metadata()
except Exception, e:
module.fail_json(msg=e.message)
else:
results[obj] = dict()
for k, v in meta.items():
meta_key = k.split(META_PREFIX)[-1]
results[obj][meta_key] = v
EXIT_DICT['container'] = c.name
if results:
EXIT_DICT['meta_results'] = results
EXIT_DICT['success'] = True
module.exit_json(**EXIT_DICT)
def put_meta(module, cf, container, src, dest, meta, clear_meta):
""" Set metadata on a container, single file, or comma-separated list.
Passing a true value to clear_meta clears the metadata stored in Cloud
Files before setting the new metadata to the value of "meta".
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; files to set meta"
" have been specified on both src and dest args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = []
for obj in objs:
try:
result = c.get_object(obj).set_metadata(meta, clear=clear_meta)
except Exception, e:
module.fail_json(msg=e.message)
else:
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_changed'] = True
module.exit_json(**EXIT_DICT)
def delete_meta(module, cf, container, src, dest, meta):
""" Removes metadata keys and values specified in meta, if any. Deletes on
all objects specified by src or dest (but not both), if any; otherwise it
deletes keys on all objects in the container
"""
objs = None
if src and dest:
module.fail_json(msg="Error: ambiguous instructions; meta keys to be "
"deleted have been specified on both src and dest"
" args")
elif dest:
objs = dest
else:
objs = src
objs = objs.split(',')
objs = map(str.strip, objs)
c = _get_container(module, cf, container)
results = [] # Num of metadata keys removed, not objects affected
for obj in objs:
if meta:
for k, v in meta.items():
try:
result = c.get_object(obj).remove_metadata_key(k)
except Exception, e:
module.fail_json(msg=e.message)
else:
results.append(result)
else:
try:
o = c.get_object(obj)
except pyrax.exc.NoSuchObject, e:
module.fail_json(msg=e.message)
for k, v in o.get_metadata().items():
try:
result = o.remove_metadata_key(k)
except Exception, e:
module.fail_json(msg=e.message)
results.append(result)
EXIT_DICT['container'] = c.name
EXIT_DICT['success'] = True
if results:
EXIT_DICT['changed'] = True
EXIT_DICT['num_deleted'] = len(results)
module.exit_json(**EXIT_DICT)
def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
structure, expires):
""" Dispatch from here to work with metadata or file objects """
cf = pyrax.cloudfiles
if cf is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if typ == "file":
if method == 'put':
upload(module, cf, container, src, dest, meta, expires)
elif method == 'get':
download(module, cf, container, src, dest, structure)
elif method == 'delete':
delete(module, cf, container, src, dest)
else:
if method == 'get':
get_meta(module, cf, container, src, dest)
if method == 'put':
put_meta(module, cf, container, src, dest, meta, clear_meta)
if method == 'delete':
delete_meta(module, cf, container, src, dest, meta)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
container=dict(required=True),
src=dict(),
dest=dict(),
method=dict(default='get', choices=['put', 'get', 'delete']),
type=dict(default='file', choices=['file', 'meta']),
meta=dict(type='dict', default=dict()),
clear_meta=dict(default=False, type='bool'),
structure=dict(default=True, type='bool'),
expires=dict(type='int'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
container = module.params.get('container')
src = module.params.get('src')
dest = module.params.get('dest')
method = module.params.get('method')
typ = module.params.get('type')
meta = module.params.get('meta')
clear_meta = module.params.get('clear_meta')
structure = module.params.get('structure')
expires = module.params.get('expires')
if clear_meta and not typ == 'meta':
module.fail_json(msg='clear_meta can only be used when setting metadata')
setup_rax_module(module, pyrax)
cloudfiles(module, container, src, dest, method, typ, meta, clear_meta, structure, expires)
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
main()

@ -1,110 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_identity
short_description: Load Rackspace Cloud Identity
description:
- Verifies Rackspace Cloud credentials and returns identity information
version_added: "1.5"
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
author: Christopher H. Laco, Matt Martz
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Load Rackspace Cloud Identity
gather_facts: False
hosts: local
connection: local
tasks:
- name: Load Identity
local_action:
module: rax_identity
credentials: ~/.raxpub
region: DFW
register: rackspace_identity
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_identity(module, state, identity):
for arg in (state, identity):
if not arg:
module.fail_json(msg='%s is required for rax_identity' % arg)
instance = dict(
authenticated=identity.authenticated,
credentials=identity._creds_file
)
changed = False
instance.update(rax_to_dict(identity))
instance['services'] = instance.get('services', {}).keys()
if state == 'present':
if not identity.authenticated:
module.fail_json(msg='Credentials could not be verified!')
module.exit_json(changed=changed, identity=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
setup_rax_module(module, pyrax)
if pyrax.identity is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
cloud_identity(module, state, pyrax.identity)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()

@ -1,174 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_keypair
short_description: Create a keypair for use with Rackspace Cloud Servers
description:
- Create a keypair for use with Rackspace Cloud Servers
version_added: 1.5
options:
name:
description:
- Name of keypair
required: true
public_key:
description:
- Public Key string to upload. Can be a file path or string
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
author: Matt Martz
notes:
- Keypairs cannot be manipulated, only created and deleted. To "update" a
keypair you must first delete and then recreate.
- The ability to specify a file path for the public key was added in 1.7
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Create a keypair
hosts: localhost
gather_facts: False
tasks:
- name: keypair request
local_action:
module: rax_keypair
credentials: ~/.raxpub
name: my_keypair
region: DFW
register: keypair
- name: Create local public key
local_action:
module: copy
content: "{{ keypair.keypair.public_key }}"
dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub"
- name: Create local private key
local_action:
module: copy
content: "{{ keypair.keypair.private_key }}"
dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}"
- name: Create a keypair
hosts: localhost
gather_facts: False
tasks:
- name: keypair request
local_action:
module: rax_keypair
credentials: ~/.raxpub
name: my_keypair
public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}"
region: DFW
register: keypair
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_keypair(module, name, public_key, state):
changed = False
cs = pyrax.cloudservers
if cs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
keypair = {}
if state == 'present':
if os.path.isfile(public_key):
try:
f = open(public_key)
public_key = f.read()
f.close()
except Exception, e:
module.fail_json(msg='Failed to load %s' % public_key)
try:
keypair = cs.keypairs.find(name=name)
except cs.exceptions.NotFound:
try:
keypair = cs.keypairs.create(name, public_key)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
except Exception, e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
keypair = cs.keypairs.find(name=name)
except:
pass
if keypair:
try:
keypair.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, keypair=rax_to_dict(keypair))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(),
public_key=dict(),
state=dict(default='present', choices=['absent', 'present']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
name = module.params.get('name')
public_key = module.params.get('public_key')
state = module.params.get('state')
setup_rax_module(module, pyrax)
rax_keypair(module, name, public_key, state)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()

@ -1,178 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_meta
short_description: Manipulate metadata for Rackspace Cloud Servers
description:
- Manipulate metadata for Rackspace Cloud Servers
version_added: 1.7
options:
address:
description:
- Server IP address to modify metadata for, will match any IP assigned to
the server
id:
description:
- Server ID to modify metadata for
name:
description:
- Server name to modify metadata for
default: null
meta:
description:
- A hash of metadata to associate with the instance
default: null
author: Matt Martz
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Set metadata for a server
hosts: all
gather_facts: False
tasks:
- name: Set metadata
local_action:
module: rax_meta
credentials: ~/.raxpub
name: "{{ inventory_hostname }}"
region: DFW
meta:
group: primary_group
groups:
- group_two
- group_three
app: my_app
- name: Clear metadata
local_action:
module: rax_meta
credentials: ~/.raxpub
name: "{{ inventory_hostname }}"
region: DFW
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_meta(module, address, name, server_id, meta):
changed = False
cs = pyrax.cloudservers
if cs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
search_opts = {}
if name:
search_opts = dict(name='^%s$' % name)
try:
servers = cs.servers.list(search_opts=search_opts)
except Exception, e:
module.fail_json(msg='%s' % e.message)
elif address:
servers = []
try:
for server in cs.servers.list():
for addresses in server.networks.values():
if address in addresses:
servers.append(server)
break
except Exception, e:
module.fail_json(msg='%s' % e.message)
elif server_id:
servers = []
try:
servers.append(cs.servers.get(server_id))
except Exception, e:
pass
if len(servers) > 1:
module.fail_json(msg='Multiple servers found matching provided '
'search parameters')
elif not servers:
module.fail_json(msg='Failed to find a server matching provided '
'search parameters')
# Normalize and ensure all metadata values are strings
for k, v in meta.items():
if isinstance(v, list):
meta[k] = ','.join(['%s' % i for i in v])
elif isinstance(v, dict):
meta[k] = json.dumps(v)
elif not isinstance(v, basestring):
meta[k] = '%s' % v
server = servers[0]
if server.metadata == meta:
changed = False
else:
changed = True
removed = set(server.metadata.keys()).difference(meta.keys())
cs.servers.delete_meta(server, list(removed))
cs.servers.set_meta(server, meta)
server.get()
module.exit_json(changed=changed, meta=server.metadata)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
address=dict(),
id=dict(),
name=dict(),
meta=dict(type='dict', default=dict()),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
mutually_exclusive=[['address', 'id', 'name']],
required_one_of=[['address', 'id', 'name']],
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
address = module.params.get('address')
server_id = module.params.get('id')
name = module.params.get('name')
meta = module.params.get('meta')
setup_rax_module(module, pyrax)
rax_meta(module, address, name, server_id, meta)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()

@ -1,145 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_network
short_description: create / delete an isolated network in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud isolated network.
version_added: "1.4"
options:
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
label:
description:
- Label (name) to give the network
default: null
cidr:
description:
- cidr of the network being created
default: null
author: Christopher H. Laco, Jesse Keating
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build an Isolated Network
gather_facts: False
tasks:
- name: Network create request
local_action:
module: rax_network
credentials: ~/.raxpub
label: my-net
cidr: 192.168.3.0/24
state: present
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_network(module, state, label, cidr):
for arg in (state, label, cidr):
if not arg:
module.fail_json(msg='%s is required for cloud_networks' % arg)
changed = False
network = None
networks = []
if not pyrax.cloud_networks:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
try:
network = pyrax.cloud_networks.find_network_by_label(label)
except pyrax.exceptions.NetworkNotFound:
try:
network = pyrax.cloud_networks.create(label, cidr=cidr)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
except Exception, e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
network = pyrax.cloud_networks.find_network_by_label(label)
network.delete()
changed = True
except pyrax.exceptions.NetworkNotFound:
pass
except Exception, e:
module.fail_json(msg='%s' % e.message)
if network:
instance = dict(id=network.id,
label=network.label,
cidr=network.cidr)
networks.append(instance)
module.exit_json(changed=changed, networks=networks)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present',
choices=['present', 'absent']),
label=dict(),
cidr=dict()
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
cidr = module.params.get('cidr')
setup_rax_module(module, pyrax)
cloud_network(module, state, label, cidr)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()

@ -1,145 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_queue
short_description: create / delete a queue in Rackspace Public Cloud
description:
- creates / deletes a Rackspace Public Cloud queue.
version_added: "1.5"
options:
name:
description:
- Name to give the queue
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
author: Christopher H. Laco, Matt Martz
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Build a Queue
gather_facts: False
hosts: local
connection: local
tasks:
- name: Queue create request
local_action:
module: rax_queue
credentials: ~/.raxpub
name: my-queue
region: DFW
state: present
register: my_queue
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_queue(module, state, name):
for arg in (state, name):
if not arg:
module.fail_json(msg='%s is required for rax_queue' % arg)
changed = False
queues = []
instance = {}
cq = pyrax.queues
if not cq:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
for queue in cq.list():
if name != queue.name:
continue
queues.append(queue)
if len(queues) > 1:
module.fail_json(msg='Multiple Queues were matched by name')
if state == 'present':
if not queues:
try:
queue = cq.create(name)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
queue = queues[0]
instance = dict(name=queue.name)
result = dict(changed=changed, queue=instance)
module.exit_json(**result)
elif state == 'absent':
if queues:
queue = queues[0]
try:
queue.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, queue=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
name = module.params.get('name')
state = module.params.get('state')
setup_rax_module(module, pyrax)
cloud_queue(module, state, name)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()

@ -1,351 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_scaling_group
short_description: Manipulate Rackspace Cloud Autoscale Groups
description:
- Manipulate Rackspace Cloud Autoscale Groups
version_added: 1.7
options:
cooldown:
description:
- The period of time, in seconds, that must pass before any scaling can
occur after the previous scaling. Must be an integer between 0 and
86400 (24 hrs).
disk_config:
description:
- Disk partitioning strategy
choices:
- auto
- manual
default: auto
files:
description:
- 'Files to insert into the instance. Hash of C(remotepath: localpath)'
default: null
flavor:
description:
- flavor to use for the instance
required: true
image:
description:
- image to use for the instance. Can be an C(id), C(human_id) or C(name)
required: true
key_name:
description:
- key pair to use on the instance
default: null
loadbalancers:
description:
- List of load balancer C(id) and C(port) hashes
max_entities:
description:
- The maximum number of entities that are allowed in the scaling group.
Must be an integer between 0 and 1000.
required: true
meta:
description:
- A hash of metadata to associate with the instance
default: null
min_entities:
description:
- The minimum number of entities that are allowed in the scaling group.
Must be an integer between 0 and 1000.
required: true
name:
description:
- Name to give the scaling group
required: true
networks:
description:
- The network to attach to the instances. If specified, you must include
ALL networks including the public and private interfaces. Can be C(id)
or C(label).
default:
- public
- private
server_name:
description:
- The base name for servers created by Autoscale
required: true
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
author: Matt Martz
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
---
- hosts: localhost
gather_facts: false
connection: local
tasks:
- rax_scaling_group:
credentials: ~/.raxpub
region: ORD
cooldown: 300
flavor: performance1-1
image: bb02b1a3-bc77-4d17-ab5b-421d89850fca
min_entities: 5
max_entities: 10
name: ASG Test
server_name: asgtest
loadbalancers:
- id: 228385
port: 80
register: asg
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
image=None, key_name=None, loadbalancers=[], meta={},
min_entities=0, max_entities=0, name=None, networks=[],
server_name=None, state='present'):
changed = False
au = pyrax.autoscale
cnw = pyrax.cloud_networks
cs = pyrax.cloudservers
if not au or not cnw or not cs:
module.fail_json(msg='Failed to instantiate clients. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
# Normalize and ensure all metadata values are strings
if meta:
for k, v in meta.items():
if isinstance(v, list):
meta[k] = ','.join(['%s' % i for i in v])
elif isinstance(v, dict):
meta[k] = json.dumps(v)
elif not isinstance(v, basestring):
meta[k] = '%s' % v
if image:
image = rax_find_image(module, pyrax, image)
nics = []
if networks:
for network in networks:
nics.extend(rax_find_network(module, pyrax, network))
for nic in nics:
# pyrax is currently returning net-id, but we need uuid
# this check makes this forward compatible for a time when
# pyrax uses uuid instead
if nic.get('net-id'):
nic.update(uuid=nic['net-id'])
del nic['net-id']
# Handle the file contents
personality = []
if files:
for rpath in files.keys():
lpath = os.path.expanduser(files[rpath])
try:
f = open(lpath, 'r')
personality.append({
'path': rpath,
'contents': f.read()
})
f.close()
except Exception, e:
module.fail_json(msg='Failed to load %s' % lpath)
lbs = []
if loadbalancers:
for lb in loadbalancers:
lb_id = lb.get('id')
port = lb.get('port')
if not lb_id or not port:
continue
lbs.append((lb_id, port))
try:
sg = au.find(name=name)
except pyrax.exceptions.NoUniqueMatch, e:
module.fail_json(msg='%s' % e.message)
except pyrax.exceptions.NotFound:
try:
sg = au.create(name, cooldown=cooldown,
min_entities=min_entities,
max_entities=max_entities,
launch_config_type='launch_server',
server_name=server_name, image=image,
flavor=flavor, disk_config=disk_config,
metadata=meta, personality=files,
networks=nics, load_balancers=lbs,
key_name=key_name)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
if not changed:
# Scaling Group Updates
group_args = {}
if cooldown != sg.cooldown:
group_args['cooldown'] = cooldown
if min_entities != sg.min_entities:
group_args['min_entities'] = min_entities
if max_entities != sg.max_entities:
group_args['max_entities'] = max_entities
if group_args:
changed = True
sg.update(**group_args)
# Launch Configuration Updates
lc = sg.get_launch_config()
lc_args = {}
if server_name != lc.get('name'):
lc_args['name'] = server_name
if image != lc.get('image'):
lc_args['image'] = image
if flavor != lc.get('flavor'):
lc_args['flavor'] = flavor
if disk_config != lc.get('disk_config'):
lc_args['disk_config'] = disk_config
if meta != lc.get('metadata'):
lc_args['metadata'] = meta
if files != lc.get('personality'):
lc_args['personality'] = files
if nics != lc.get('networks'):
lc_args['networks'] = nics
if lbs != lc.get('load_balancers'):
# Work around for https://github.com/rackspace/pyrax/pull/393
lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs)
if key_name != lc.get('key_name'):
lc_args['key_name'] = key_name
if lc_args:
# Work around for https://github.com/rackspace/pyrax/pull/389
if 'flavor' not in lc_args:
lc_args['flavor'] = lc.get('flavor')
changed = True
sg.update_launch_config(**lc_args)
sg.get()
module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
else:
try:
sg = au.find(name=name)
sg.delete()
changed = True
except pyrax.exceptions.NotFound, e:
sg = {}
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
cooldown=dict(type='int', default=300),
disk_config=dict(choices=['auto', 'manual']),
files=dict(type='list', default=[]),
flavor=dict(required=True),
image=dict(required=True),
key_name=dict(),
loadbalancers=dict(type='list'),
meta=dict(type='dict', default={}),
min_entities=dict(type='int', required=True),
max_entities=dict(type='int', required=True),
name=dict(required=True),
networks=dict(type='list', default=['public', 'private']),
server_name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
cooldown = module.params.get('cooldown')
disk_config = module.params.get('disk_config')
if disk_config:
disk_config = disk_config.upper()
files = module.params.get('files')
flavor = module.params.get('flavor')
image = module.params.get('image')
key_name = module.params.get('key_name')
loadbalancers = module.params.get('loadbalancers')
meta = module.params.get('meta')
min_entities = module.params.get('min_entities')
max_entities = module.params.get('max_entities')
name = module.params.get('name')
networks = module.params.get('networks')
server_name = module.params.get('server_name')
state = module.params.get('state')
if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000:
module.fail_json(msg='min_entities and max_entities must be an '
'integer between 0 and 1000')
if not 0 <= cooldown <= 86400:
module.fail_json(msg='cooldown must be an integer between 0 and 86400')
setup_rax_module(module, pyrax)
rax_asg(module, cooldown=cooldown, disk_config=disk_config,
files=files, flavor=flavor, image=image, meta=meta,
key_name=key_name, loadbalancers=loadbalancers,
min_entities=min_entities, max_entities=max_entities,
name=name, networks=networks, server_name=server_name,
state=state)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()

@ -1,283 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_scaling_policy
short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy
description:
- Manipulate Rackspace Cloud Autoscale Scaling Policy
version_added: 1.7
options:
at:
description:
- The UTC time when this policy will be executed. The time must be
formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as
C(2013-05-19T08:07:08Z)
change:
description:
- The change, either as a number of servers or as a percentage, to make
in the scaling group. If this is a percentage, you must set
I(is_percent) to C(true) also.
cron:
description:
- The time when the policy will be executed, as a cron entry. For
example, if this is parameter is set to C(1 0 * * *)
cooldown:
description:
- The period of time, in seconds, that must pass before any scaling can
occur after the previous scaling. Must be an integer between 0 and
86400 (24 hrs).
desired_capacity:
description:
- The desired server capacity of the scaling the group; that is, how
many servers should be in the scaling group.
is_percent:
description:
- Whether the value in I(change) is a percent value
default: false
name:
description:
- Name to give the policy
required: true
policy_type:
description:
- The type of policy that will be executed for the current release.
choices:
- webhook
- schedule
required: true
scaling_group:
description:
- Name of the scaling group that this policy will be added to
required: true
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
author: Matt Martz
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
---
- hosts: localhost
gather_facts: false
connection: local
tasks:
- rax_scaling_policy:
credentials: ~/.raxpub
region: ORD
at: '2013-05-19T08:07:08Z'
change: 25
cooldown: 300
is_percent: true
name: ASG Test Policy - at
policy_type: schedule
scaling_group: ASG Test
register: asps_at
- rax_scaling_policy:
credentials: ~/.raxpub
region: ORD
cron: '1 0 * * *'
change: 25
cooldown: 300
is_percent: true
name: ASG Test Policy - cron
policy_type: schedule
scaling_group: ASG Test
register: asp_cron
- rax_scaling_policy:
credentials: ~/.raxpub
region: ORD
cooldown: 300
desired_capacity: 5
name: ASG Test Policy - webhook
policy_type: webhook
scaling_group: ASG Test
register: asp_webhook
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_asp(module, at=None, change=0, cron=None, cooldown=300,
desired_capacity=0, is_percent=False, name=None,
policy_type=None, scaling_group=None, state='present'):
changed = False
au = pyrax.autoscale
if not au:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
try:
UUID(scaling_group)
except ValueError:
try:
sg = au.find(name=scaling_group)
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
try:
sg = au.get(scaling_group)
except Exception, e:
module.fail_json(msg='%s' % e.message)
if state == 'present':
policies = filter(lambda p: name == p.name, sg.list_policies())
if len(policies) > 1:
module.fail_json(msg='No unique policy match found by name')
if at:
args = dict(at=at)
elif cron:
args = dict(cron=cron)
else:
args = None
if not policies:
try:
policy = sg.add_policy(name, policy_type=policy_type,
cooldown=cooldown, change=change,
is_percent=is_percent,
desired_capacity=desired_capacity,
args=args)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
policy = policies[0]
kwargs = {}
if policy_type != policy.type:
kwargs['policy_type'] = policy_type
if cooldown != policy.cooldown:
kwargs['cooldown'] = cooldown
if hasattr(policy, 'change') and change != policy.change:
kwargs['change'] = change
if hasattr(policy, 'changePercent') and is_percent is False:
kwargs['change'] = change
kwargs['is_percent'] = False
elif hasattr(policy, 'change') and is_percent is True:
kwargs['change'] = change
kwargs['is_percent'] = True
if hasattr(policy, 'desiredCapacity') and change:
kwargs['change'] = change
elif ((hasattr(policy, 'change') or
hasattr(policy, 'changePercent')) and desired_capacity):
kwargs['desired_capacity'] = desired_capacity
if hasattr(policy, 'args') and args != policy.args:
kwargs['args'] = args
if kwargs:
policy.update(**kwargs)
changed = True
policy.get()
module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
else:
try:
policies = filter(lambda p: name == p.name, sg.list_policies())
if len(policies) > 1:
module.fail_json(msg='No unique policy match found by name')
elif not policies:
policy = {}
else:
policy.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, autoscale_policy=rax_to_dict(policy))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
at=dict(),
change=dict(type='int'),
cron=dict(),
cooldown=dict(type='int', default=300),
desired_capacity=dict(type='int'),
is_percent=dict(type='bool', default=False),
name=dict(required=True),
policy_type=dict(required=True, choices=['webhook', 'schedule']),
scaling_group=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
mutually_exclusive=[
['cron', 'at'],
['change', 'desired_capacity'],
]
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
at = module.params.get('at')
change = module.params.get('change')
cron = module.params.get('cron')
cooldown = module.params.get('cooldown')
desired_capacity = module.params.get('desired_capacity')
is_percent = module.params.get('is_percent')
name = module.params.get('name')
policy_type = module.params.get('policy_type')
scaling_group = module.params.get('scaling_group')
state = module.params.get('state')
if (at or cron) and policy_type == 'webhook':
module.fail_json(msg='policy_type=schedule is required for a time '
'based policy')
setup_rax_module(module, pyrax)
rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown,
desired_capacity=desired_capacity, is_percent=is_percent,
name=name, policy_type=policy_type, scaling_group=scaling_group,
state=state)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()

@ -1,650 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rds
version_added: "1.3"
short_description: create, delete, or modify an Amazon rds instance
description:
- Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0.
options:
command:
description:
- Specifies the action to take.
required: true
default: null
aliases: []
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'restore' ]
instance_name:
description:
- Database instance identifier.
required: true
default: null
aliases: []
source_instance:
description:
- Name of the database to replicate. Used only when command=replicate.
required: false
default: null
aliases: []
db_engine:
description:
- The type of database. Used only when command=create.
required: false
default: null
aliases: []
choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres']
size:
description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
required: false
default: null
aliases: []
instance_type:
description:
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance.
required: false
default: null
aliases: []
username:
description:
- Master database username. Used only when command=create.
required: false
default: null
aliases: []
password:
description:
- Password for the master database username. Used only when command=create or command=modify.
required: false
default: null
aliases: []
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: [ 'aws_region', 'ec2_region' ]
db_name:
description:
- Name of a database to create within the instance. If not specified then no database is created. Used only when command=create.
required: false
default: null
aliases: []
engine_version:
description:
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used.
required: false
default: null
aliases: []
parameter_group:
description:
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify.
required: false
default: null
aliases: []
license_model:
description:
- The license model for this DB instance. Used only when command=create or command=restore.
required: false
default: null
aliases: []
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license' ]
multi_zone:
description:
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify.
choices: [ "yes", "no" ]
required: false
default: null
aliases: []
iops:
description:
- Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000.
required: false
default: null
aliases: []
security_groups:
description:
- Comma separated list of one or more security groups. Used only when command=create or command=modify.
required: false
default: null
aliases: []
vpc_security_groups:
description:
- Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify.
required: false
default: null
aliases: []
port:
description:
- Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1443 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate.
required: false
default: null
aliases: []
upgrade:
description:
- Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate.
required: false
default: no
choices: [ "yes", "no" ]
aliases: []
option_group:
description:
- The name of the option group to use. If not specified then the default option group is used. Used only when command=create.
required: false
default: null
aliases: []
maint_window:
description:
- "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify."
required: false
default: null
aliases: []
backup_window:
description:
- Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify.
required: false
default: null
aliases: []
backup_retention:
description:
- "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify."
required: false
default: null
aliases: []
zone:
description:
- availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore.
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
subnet:
description:
- VPC subnet group. If specified then a VPC instance is created. Used only when command=create.
required: false
default: null
aliases: []
snapshot:
description:
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. Used only when command=delete or command=snapshot.
required: false
default: null
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
wait:
description:
- When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: []
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
aliases: []
apply_immediately:
description:
- Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window.
default: no
choices: [ "yes", "no" ]
aliases: []
new_instance_name:
description:
- Name to rename an instance to. Used only when command=modify.
required: false
default: null
aliases: []
version_added: 1.5
requirements: [ "boto" ]
author: Bruce Pennypacker
'''
EXAMPLES = '''
# Basic mysql provisioning example
- rds: >
command=create
instance_name=new_database
db_engine=MySQL
size=10
instance_type=db.m1.small
username=mysql_admin
password=1nsecure
# Create a read-only replica and wait for it to become available
- rds: >
command=replicate
instance_name=new_database_replica
source_instance=new_database
wait=yes
wait_timeout=600
# Delete an instance, but create a snapshot before doing so
- rds: >
command=delete
instance_name=new_database
snapshot=new_database_snapshot
# Get facts about an instance
- rds: >
command=facts
instance_name=new_database
register: new_database_facts
# Rename an instance and wait for the change to take effect
- rds: >
command=modify
instance_name=new_database
new_instance_name=renamed_database
wait=yes
'''
import sys
import time
try:
import boto.rds
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def get_current_resource(conn, resource, command):
# There will be exceptions but we want the calling code to handle them
if command == 'snapshot':
return conn.get_all_dbsnapshots(snapshot_id=resource)[0]
else:
return conn.get_all_dbinstances(resource)[0]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'restore'], required=True),
instance_name = dict(required=True),
source_instance = dict(required=False),
db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False),
size = dict(required=False),
instance_type = dict(aliases=['type'], required=False),
username = dict(required=False),
password = dict(no_log=True, required=False),
db_name = dict(required=False),
engine_version = dict(required=False),
parameter_group = dict(required=False),
license_model = dict(choices=['license-included', 'bring-your-own-license', 'general-public-license'], required=False),
multi_zone = dict(type='bool', default=False),
iops = dict(required=False),
security_groups = dict(required=False),
vpc_security_groups = dict(type='list', required=False),
port = dict(required=False),
upgrade = dict(type='bool', default=False),
option_group = dict(required=False),
maint_window = dict(required=False),
backup_window = dict(required=False),
backup_retention = dict(required=False),
zone = dict(aliases=['aws_zone', 'ec2_zone'], required=False),
subnet = dict(required=False),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
snapshot = dict(required=False),
apply_immediately = dict(type='bool', default=False),
new_instance_name = dict(required=False),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
command = module.params.get('command')
instance_name = module.params.get('instance_name')
source_instance = module.params.get('source_instance')
db_engine = module.params.get('db_engine')
size = module.params.get('size')
instance_type = module.params.get('instance_type')
username = module.params.get('username')
password = module.params.get('password')
db_name = module.params.get('db_name')
engine_version = module.params.get('engine_version')
parameter_group = module.params.get('parameter_group')
license_model = module.params.get('license_model')
multi_zone = module.params.get('multi_zone')
iops = module.params.get('iops')
security_groups = module.params.get('security_groups')
vpc_security_groups = module.params.get('vpc_security_groups')
port = module.params.get('port')
upgrade = module.params.get('upgrade')
option_group = module.params.get('option_group')
maint_window = module.params.get('maint_window')
subnet = module.params.get('subnet')
backup_window = module.params.get('backup_window')
backup_retention = module.params.get('backup_retention')
region = module.params.get('region')
zone = module.params.get('zone')
aws_secret_key = module.params.get('aws_secret_key')
aws_access_key = module.params.get('aws_access_key')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
snapshot = module.params.get('snapshot')
apply_immediately = module.params.get('apply_immediately')
new_instance_name = module.params.get('new_instance_name')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg = str("region not specified and unable to determine region from EC2_REGION."))
# connect to the rds endpoint
try:
conn = connect_to_aws(boto.rds, region, **aws_connect_params)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
def invalid_security_group_type(subnet):
if subnet:
return 'security_groups'
else:
return 'vpc_security_groups'
# Package up the optional parameters
params = {}
# Validate parameters for each command
if command == 'create':
required_vars = [ 'instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password' ]
invalid_vars = [ 'source_instance', 'snapshot', 'apply_immediately', 'new_instance_name' ] + [invalid_security_group_type(subnet)]
elif command == 'replicate':
required_vars = [ 'instance_name', 'source_instance' ]
invalid_vars = [ 'db_engine', 'size', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'backup_window', 'backup_retention', 'subnet', 'snapshot', 'apply_immediately', 'new_instance_name' ]
elif command == 'delete':
required_vars = [ 'instance_name' ]
invalid_vars = [ 'db_engine', 'size', 'instance_type', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups' ,'security_groups', 'option_group', 'maint_window', 'backup_window', 'backup_retention', 'port', 'upgrade', 'subnet', 'zone' , 'source_instance', 'apply_immediately', 'new_instance_name' ]
elif command == 'facts':
required_vars = [ 'instance_name' ]
invalid_vars = [ 'db_engine', 'size', 'instance_type', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'backup_window', 'backup_retention', 'port', 'upgrade', 'subnet', 'zone', 'wait', 'source_instance' 'apply_immediately', 'new_instance_name' ]
elif command == 'modify':
required_vars = [ 'instance_name' ]
if password:
params["master_password"] = password
invalid_vars = [ 'db_engine', 'username', 'db_name', 'engine_version', 'license_model', 'option_group', 'port', 'upgrade', 'subnet', 'zone', 'source_instance']
elif command == 'promote':
required_vars = [ 'instance_name' ]
invalid_vars = [ 'db_engine', 'size', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'subnet', 'source_instance', 'snapshot', 'apply_immediately', 'new_instance_name' ]
elif command == 'snapshot':
required_vars = [ 'instance_name', 'snapshot']
invalid_vars = [ 'db_engine', 'size', 'username', 'password', 'db_name', 'engine_version', 'parameter_group', 'license_model', 'multi_zone', 'iops', 'vpc_security_groups', 'security_groups', 'option_group', 'maint_window', 'subnet', 'source_instance', 'apply_immediately', 'new_instance_name' ]
elif command == 'restore':
required_vars = [ 'instance_name', 'snapshot', 'instance_type' ]
invalid_vars = [ 'db_engine', 'db_name', 'username', 'password', 'engine_version', 'option_group', 'source_instance', 'apply_immediately', 'new_instance_name', 'vpc_security_groups', 'security_groups' ]
for v in required_vars:
if not module.params.get(v):
module.fail_json(msg = str("Parameter %s required for %s command" % (v, command)))
for v in invalid_vars:
if module.params.get(v):
module.fail_json(msg = str("Parameter %s invalid for %s command" % (v, command)))
if db_engine:
params["engine"] = db_engine
if port:
params["port"] = port
if db_name:
params["db_name"] = db_name
if parameter_group:
params["param_group"] = parameter_group
if zone:
params["availability_zone"] = zone
if maint_window:
params["preferred_maintenance_window"] = maint_window
if backup_window:
params["preferred_backup_window"] = backup_window
if backup_retention:
params["backup_retention_period"] = backup_retention
if multi_zone:
params["multi_az"] = multi_zone
if engine_version:
params["engine_version"] = engine_version
if upgrade:
params["auto_minor_version_upgrade"] = upgrade
if subnet:
params["db_subnet_group_name"] = subnet
if license_model:
params["license_model"] = license_model
if option_group:
params["option_group_name"] = option_group
if iops:
params["iops"] = iops
if security_groups:
params["security_groups"] = security_groups.split(',')
if vpc_security_groups:
groups_list = []
for x in vpc_security_groups:
groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x))
params["vpc_security_groups"] = groups_list
if new_instance_name:
params["new_instance_id"] = new_instance_name
changed = True
if command in ['create', 'restore', 'facts']:
try:
result = conn.get_all_dbinstances(instance_name)[0]
changed = False
except boto.exception.BotoServerError, e:
try:
if command == 'create':
result = conn.create_dbinstance(instance_name, size, instance_type, username, password, **params)
if command == 'restore':
result = conn.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params)
if command == 'facts':
module.fail_json(msg = "DB Instance %s does not exist" % instance_name)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
if command == 'snapshot':
try:
result = conn.get_all_dbsnapshots(snapshot)[0]
changed = False
except boto.exception.BotoServerError, e:
try:
result = conn.create_dbsnapshot(snapshot, instance_name)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
if command == 'delete':
try:
result = conn.get_all_dbinstances(instance_name)[0]
if result.status == 'deleting':
module.exit_json(changed=False)
except boto.exception.BotoServerError, e:
module.exit_json(changed=False)
try:
if snapshot:
params["skip_final_snapshot"] = False
params["final_snapshot_id"] = snapshot
else:
params["skip_final_snapshot"] = True
result = conn.delete_dbinstance(instance_name, **params)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
if command == 'replicate':
try:
if instance_type:
params["instance_class"] = instance_type
result = conn.create_dbinstance_read_replica(instance_name, source_instance, **params)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
if command == 'modify':
try:
params["apply_immediately"] = apply_immediately
result = conn.modify_dbinstance(instance_name, **params)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
if apply_immediately:
if new_instance_name:
# Wait until the new instance name is valid
found = 0
while found == 0:
instances = conn.get_all_dbinstances()
for i in instances:
if i.id == new_instance_name:
instance_name = new_instance_name
found = 1
if found == 0:
time.sleep(5)
# The name of the database has now changed, so we have
# to force result to contain the new instance, otherwise
# the call below to get_current_resource will fail since it
# will be looking for the old instance name.
result.id = new_instance_name
else:
# Wait for a few seconds since it takes a while for AWS
# to change the instance from 'available' to 'modifying'
time.sleep(5)
if command == 'promote':
try:
result = conn.promote_read_replica(instance_name, **params)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
# If we're not waiting for a delete to complete then we're all done
# so just return
if command == 'delete' and not wait:
module.exit_json(changed=True)
try:
resource = get_current_resource(conn, result.id, command)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
# Wait for the resource to be available if requested
if wait:
try:
wait_timeout = time.time() + wait_timeout
time.sleep(5)
while wait_timeout > time.time() and resource.status != 'available':
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
resource = get_current_resource(conn, result.id, command)
except boto.exception.BotoServerError, e:
# If we're waiting for an instance to be deleted then
# get_all_dbinstances will eventually throw a
# DBInstanceNotFound error.
if command == 'delete' and e.error_code == 'DBInstanceNotFound':
module.exit_json(changed=True)
else:
module.fail_json(msg = e.error_message)
# If we got here then pack up all the instance details to send
# back to ansible
if command == 'snapshot':
d = {
'id' : resource.id,
'create_time' : resource.snapshot_create_time,
'status' : resource.status,
'availability_zone' : resource.availability_zone,
'instance_id' : resource.instance_id,
'instance_created' : resource.instance_create_time,
}
try:
d["snapshot_type"] = resource.snapshot_type
d["iops"] = resource.iops
except AttributeError, e:
pass # needs boto >= 2.21.0
return module.exit_json(changed=changed, snapshot=d)
d = {
'id' : resource.id,
'create_time' : resource.create_time,
'status' : resource.status,
'availability_zone' : resource.availability_zone,
'backup_retention' : resource.backup_retention_period,
'backup_window' : resource.preferred_backup_window,
'maintenance_window' : resource.preferred_maintenance_window,
'multi_zone' : resource.multi_az,
'instance_type' : resource.instance_class,
'username' : resource.master_username,
'iops' : resource.iops
}
# Endpoint exists only if the instance is available
if resource.status == 'available' and command != 'snapshot':
d["endpoint"] = resource.endpoint[0]
d["port"] = resource.endpoint[1]
if resource.vpc_security_groups is not None:
d["vpc_security_groups"] = ','.join(x.vpc_group for x in resource.vpc_security_groups)
else:
d["vpc_security_groups"] = None
else:
d["endpoint"] = None
d["port"] = None
d["vpc_security_groups"] = None
# ReadReplicaSourceDBInstanceIdentifier may or may not exist
try:
d["replication_source"] = resource.ReadReplicaSourceDBInstanceIdentifier
except Exception, e:
d["replication_source"] = None
module.exit_json(changed=changed, instance=d)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -1,313 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rds_param_group
version_added: "1.5"
short_description: manage RDS parameter groups
description:
- Creates, modifies, and deletes RDS parameter groups. This module has a dependency on python-boto >= 2.5.
options:
state:
description:
- Specifies whether the group should be present or absent.
required: true
default: present
aliases: []
choices: [ 'present' , 'absent' ]
name:
description:
- Database parameter group identifier.
required: true
default: null
aliases: []
description:
description:
- Database parameter group description. Only set when a new group is added.
required: false
default: null
aliases: []
engine:
description:
- The type of database for this group. Required for state=present.
required: false
default: null
aliases: []
choices: [ 'mysql5.1', 'mysql5.5', 'mysql5.6', 'oracle-ee-11.2', 'oracle-se-11.2', 'oracle-se1-11.2', 'postgres9.3', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0']
immediate:
description:
- Whether to apply the changes immediately, or after the next reboot of any associated instances.
required: false
default: null
aliases: []
params:
description:
- Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3), or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group.
required: false
default: null
aliases: []
choices: [ 'mysql5.1', 'mysql5.5', 'mysql5.6', 'oracle-ee-11.2', 'oracle-se-11.2', 'oracle-se1-11.2', 'postgres9.3', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0']
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: [ 'aws_region', 'ec2_region' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
requirements: [ "boto" ]
author: Scott Anderson
'''
EXAMPLES = '''
# Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024
- rds_param_group: >
state=present
name=norwegian_blue
description=My Fancy Ex Parrot Group
engine=mysql5.6
params='{"auto_increment_increment": "42K"}'
# Remove a parameter group
- rds_param_group: >
state=absent
name=norwegian_blue
'''
import sys
import time
VALID_ENGINES = [
'mysql5.1',
'mysql5.5',
'mysql5.6',
'oracle-ee-11.2',
'oracle-se-11.2',
'oracle-se1-11.2',
'postgres9.3',
'sqlserver-ee-10.5',
'sqlserver-ee-11.0',
'sqlserver-ex-10.5',
'sqlserver-ex-11.0',
'sqlserver-se-10.5',
'sqlserver-se-11.0',
'sqlserver-web-10.5',
'sqlserver-web-11.0',
]
try:
import boto.rds
from boto.exception import BotoServerError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
# returns a tuple: (whether or not a parameter was changed, the remaining parameters that weren't found in this parameter group)
class NotModifiableError(StandardError):
def __init__(self, error_message, *args):
super(NotModifiableError, self).__init__(error_message, *args)
self.error_message = error_message
def __repr__(self):
return 'NotModifiableError: %s' % self.error_message
def __str__(self):
return 'NotModifiableError: %s' % self.error_message
INT_MODIFIERS = {
'K': 1024,
'M': pow(1024, 2),
'G': pow(1024, 3),
'T': pow(1024, 4),
}
TRUE_VALUES = ('on', 'true', 'yes', '1',)
def set_parameter(param, value, immediate):
"""
Allows setting parameters with 10M = 10* 1024 * 1024 and so on.
"""
converted_value = value
if param.type == 'string':
converted_value = str(value)
elif param.type == 'integer':
if isinstance(value, basestring):
try:
for modifier in INT_MODIFIERS.keys():
if value.endswith(modifier):
converted_value = int(value[:-1]) * INT_MODIFIERS[modifier]
converted_value = int(converted_value)
except ValueError:
# may be based on a variable (ie. {foo*3/4}) so
# just pass it on through to boto
converted_value = str(value)
elif type(value) == bool:
converted_value = 1 if value else 0
else:
converted_value = int(value)
elif param.type == 'boolean':
if isinstance(value, basestring):
converted_value = value in TRUE_VALUES
else:
converted_value = bool(value)
param.value = converted_value
param.apply(immediate)
def modify_group(group, params, immediate=False):
""" Set all of the params in a group to the provided new params. Raises NotModifiableError if any of the
params to be changed are read only.
"""
changed = {}
new_params = dict(params)
for key in new_params.keys():
if group.has_key(key):
param = group[key]
new_value = new_params[key]
try:
old_value = param.value
except ValueError:
# some versions of boto have problems with retrieving
# integer values from params that may have their value
# based on a variable (ie. {foo*3/4}), so grab it in a
# way that bypasses the property functions
old_value = param._value
if old_value != new_value:
if not param.is_modifiable:
raise NotModifiableError('Parameter %s is not modifiable.' % key)
changed[key] = {'old': param.value, 'new': new_value}
set_parameter(param, new_value, immediate)
del new_params[key]
return changed, new_params
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(required=True, choices=['present', 'absent']),
name = dict(required=True),
engine = dict(required=False, choices=VALID_ENGINES),
description = dict(required=False),
params = dict(required=False, aliases=['parameters'], type='dict'),
immediate = dict(required=False, type='bool'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_engine = module.params.get('engine')
group_description = module.params.get('description')
group_params = module.params.get('params') or {}
immediate = module.params.get('immediate') or False
if state == 'present':
for required in ['name', 'description', 'engine', 'params']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'engine', 'params']:
if module.params.get(not_allowed):
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
if not region:
module.fail_json(msg = str("region not specified and unable to determine region from EC2_REGION."))
try:
conn = boto.rds.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
group_was_added = False
try:
changed = False
try:
all_groups = conn.get_all_dbparameter_groups(group_name, max_records=100)
exists = len(all_groups) > 0
except BotoServerError, e:
if e.error_code != 'DBParameterGroupNotFound':
module.fail_json(msg = e.error_message)
exists = False
if state == 'absent':
if exists:
conn.delete_parameter_group(group_name)
changed = True
else:
changed = {}
if not exists:
new_group = conn.create_parameter_group(group_name, engine=group_engine, description=group_description)
group_was_added = True
# If a "Marker" is present, this group has more attributes remaining to check. Get the next batch, but only
# if there are parameters left to set.
marker = None
while len(group_params):
next_group = conn.get_all_dbparameters(group_name, marker=marker)
changed_params, group_params = modify_group(next_group, group_params, immediate)
changed.update(changed_params)
if hasattr(next_group, 'Marker'):
marker = next_group.Marker
else:
break
except BotoServerError, e:
module.fail_json(msg = e.error_message)
except NotModifiableError, e:
msg = e.error_message
if group_was_added:
msg = '%s The group "%s" was added first.' % (msg, group_name)
module.fail_json(msg=msg)
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -1,166 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rds_subnet_group
version_added: "1.5"
short_description: manage RDS database subnet groups
description:
- Creates, modifies, and deletes RDS database subnet groups. This module has a dependency on python-boto >= 2.5.
options:
state:
description:
- Specifies whether the subnet should be present or absent.
required: true
default: present
aliases: []
choices: [ 'present' , 'absent' ]
name:
description:
- Database subnet group identifier.
required: true
default: null
aliases: []
description:
description:
- Database subnet group description. Only set when a new group is added.
required: false
default: null
aliases: []
subnets:
description:
- List of subnet IDs that make up the database subnet group.
required: false
default: null
aliases: []
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: [ 'aws_region', 'ec2_region' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
requirements: [ "boto" ]
author: Scott Anderson
'''
EXAMPLES = '''
# Add or change a subnet group
- local_action:
module: rds_subnet_group
state: present
name: norwegian-blue
description: My Fancy Ex Parrot Subnet Group
subnets:
- subnet-aaaaaaaa
- subnet-bbbbbbbb
# Remove a parameter group
- rds_param_group: >
state=absent
name=norwegian-blue
'''
import sys
import time
try:
import boto.rds
from boto.exception import BotoServerError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(required=True, choices=['present', 'absent']),
name = dict(required=True),
description = dict(required=False),
subnets = dict(required=False, type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_description = module.params.get('description')
group_subnets = module.params.get('subnets') or {}
if state == 'present':
for required in ['name', 'description', 'subnets']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'subnets']:
if module.params.get(not_allowed):
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
if not region:
module.fail_json(msg = str("region not specified and unable to determine region from EC2_REGION."))
try:
conn = boto.rds.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
try:
changed = False
exists = False
try:
matching_groups = conn.get_all_db_subnet_groups(group_name, max_records=100)
exists = len(matching_groups) > 0
except BotoServerError, e:
if e.error_code != 'DBSubnetGroupNotFoundFault':
module.fail_json(msg = e.error_message)
if state == 'absent':
if exists:
conn.delete_db_subnet_group(group_name)
changed = True
else:
if not exists:
new_group = conn.create_db_subnet_group(group_name, desc=group_description, subnet_ids=group_subnets)
else:
changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets)
except BotoServerError, e:
module.fail_json(msg = e.error_message)
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -1,281 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: route53
version_added: "1.3"
short_description: add or delete entries in Amazons Route53 DNS service
description:
- Creates and deletes DNS records in Amazons Route53 service
options:
command:
description:
- Specifies the action to take.
required: true
default: null
aliases: []
choices: [ 'get', 'create', 'delete' ]
zone:
description:
- The DNS zone to modify
required: true
default: null
aliases: []
record:
description:
- The full DNS record to create or delete
required: true
default: null
aliases: []
ttl:
description:
- The TTL to give the new record
required: false
default: 3600 (one hour)
aliases: []
type:
description:
- The type of DNS record to create
required: true
default: null
aliases: []
choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS' ]
value:
description:
- The new value when creating a DNS record. Multiple comma-spaced values are allowed. When deleting a record all values for the record must be specified or Route53 will not delete it.
required: false
default: null
aliases: []
aws_secret_key:
description:
- AWS secret key.
required: false
default: null
aliases: ['ec2_secret_key', 'secret_key']
aws_access_key:
description:
- AWS access key.
required: false
default: null
aliases: ['ec2_access_key', 'access_key']
overwrite:
description:
- Whether an existing record should be overwritten on create if values do not match
required: false
default: null
aliases: []
retry_interval:
description:
- In the case that route53 is still servicing a prior request, this module will wait and try again after this many seconds. If you have many domain names, the default of 500 seconds may be too long.
required: false
default: 500
aliases: []
requirements: [ "boto" ]
author: Bruce Pennypacker
'''
EXAMPLES = '''
# Add new.foo.com as an A record with 3 IPs
- route53: >
command=create
zone=foo.com
record=new.foo.com
type=A
ttl=7200
value=1.1.1.1,2.2.2.2,3.3.3.3
# Retrieve the details for new.foo.com
- route53: >
command=get
zone=foo.com
record=new.foo.com
type=A
register: rec
# Delete new.foo.com A record using the results from the get command
- route53: >
command=delete
zone=foo.com
record={{ rec.set.record }}
type={{ rec.set.type }}
value={{ rec.set.value }}
# Add an AAAA record. Note that because there are colons in the value
# that the entire parameter list must be quoted:
- route53: >
command=create
zone=foo.com
record=localhost.foo.com
type=AAAA
ttl=7200
value="::1"
# Add a TXT record. Note that TXT and SPF records must be surrounded
# by quotes when sent to Route 53:
- route53: >
command=create
zone=foo.com
record=localhost.foo.com
type=TXT
ttl=7200
value="\"bar\""
'''
import sys
import time
try:
import boto
from boto import route53
from boto.route53.record import ResourceRecordSets
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def commit(changes, retry_interval):
"""Commit changes, but retry PriorRequestNotComplete errors."""
retry = 10
while True:
try:
retry -= 1
return changes.commit()
except boto.route53.exception.DNSServerError, e:
code = e.body.split("<Code>")[1]
code = code.split("</Code>")[0]
if code != 'PriorRequestNotComplete' or retry < 0:
raise e
time.sleep(retry_interval)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command = dict(choices=['get', 'create', 'delete'], required=True),
zone = dict(required=True),
record = dict(required=True),
ttl = dict(required=False, default=3600),
type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True),
value = dict(required=False),
overwrite = dict(required=False, type='bool'),
retry_interval = dict(required=False, default=500)
)
)
module = AnsibleModule(argument_spec=argument_spec)
command_in = module.params.get('command')
zone_in = module.params.get('zone')
ttl_in = module.params.get('ttl')
record_in = module.params.get('record')
type_in = module.params.get('type')
value_in = module.params.get('value')
retry_interval_in = module.params.get('retry_interval')
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
value_list = ()
if type(value_in) is str:
if value_in:
value_list = sorted(value_in.split(','))
elif type(value_in) is list:
value_list = sorted(value_in)
if zone_in[-1:] != '.':
zone_in += "."
if record_in[-1:] != '.':
record_in += "."
if command_in == 'create' or command_in == 'delete':
if not value_in:
module.fail_json(msg = "parameter 'value' required for create/delete")
# connect to the route53 endpoint
try:
conn = boto.route53.connection.Route53Connection(aws_access_key, aws_secret_key)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
# Get all the existing hosted zones and save their ID's
zones = {}
results = conn.get_all_hosted_zones()
for r53zone in results['ListHostedZonesResponse']['HostedZones']:
zone_id = r53zone['Id'].replace('/hostedzone/', '')
zones[r53zone['Name']] = zone_id
# Verify that the requested zone is already defined in Route53
if not zone_in in zones:
errmsg = "Zone %s does not exist in Route53" % zone_in
module.fail_json(msg = errmsg)
record = {}
found_record = False
sets = conn.get_all_rrsets(zones[zone_in])
for rset in sets:
# Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round
# tripping of things like * and @.
decoded_name = rset.name.replace(r'\052', '*')
decoded_name = decoded_name.replace(r'\100', '@')
if rset.type == type_in and decoded_name == record_in:
found_record = True
record['zone'] = zone_in
record['type'] = rset.type
record['record'] = decoded_name
record['ttl'] = rset.ttl
record['value'] = ','.join(sorted(rset.resource_records))
record['values'] = sorted(rset.resource_records)
if value_list == sorted(rset.resource_records) and int(record['ttl']) == ttl_in and command_in == 'create':
module.exit_json(changed=False)
if command_in == 'get':
module.exit_json(changed=False, set=record)
if command_in == 'delete' and not found_record:
module.exit_json(changed=False)
changes = ResourceRecordSets(conn, zones[zone_in])
if command_in == 'create' and found_record:
if not module.params['overwrite']:
module.fail_json(msg = "Record already exists with different value. Set 'overwrite' to replace it")
else:
change = changes.add_change("DELETE", record_in, type_in, record['ttl'])
for v in record['values']:
change.add_value(v)
if command_in == 'create' or command_in == 'delete':
change = changes.add_change(command_in.upper(), record_in, type_in, ttl_in)
for v in value_list:
change.add_value(v)
try:
result = commit(changes, retry_interval_in)
except boto.route53.exception.DNSServerError, e:
txt = e.body.split("<Message>")[1]
txt = txt.split("</Message>")[0]
module.fail_json(msg = txt)
module.exit_json(changed=True)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -1,514 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: s3
short_description: S3 module putting a file into S3.
description:
- This module allows the user to dictate the presence of a given file in an S3 bucket. If or once the key (file) exists in the bucket, it returns a time-expired download URL. This module has a dependency on python-boto.
version_added: "1.1"
options:
bucket:
description:
- Bucket name.
required: true
default: null
aliases: []
object:
description:
- Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
required: false
default: null
aliases: []
version_added: "1.3"
src:
description:
- The source file path when performing a PUT operation.
required: false
default: null
aliases: []
version_added: "1.3"
dest:
description:
- The destination file path when downloading an object/key with a GET operation.
required: false
aliases: []
version_added: "1.3"
overwrite:
description:
- Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
required: false
default: true
version_added: "1.2"
mode:
description:
- Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket) and delete (bucket).
required: true
default: null
aliases: []
expiration:
description:
- Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation.
required: false
default: 600
aliases: []
s3_url:
description:
- "S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined. Ansible tries to guess if fakes3 (https://github.com/jubos/fake-s3) or Eucalyptus Walrus (https://github.com/eucalyptus/eucalyptus/wiki/Walrus) is used and configure connection accordingly. Current heuristic is: everything with scheme fakes3:// is fakes3, everything else not ending with amazonaws.com is Walrus."
default: null
aliases: [ S3_URL ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: ['ec2_secret_key', 'secret_key']
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
metadata:
description:
- Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
required: false
default: null
version_added: "1.6"
region:
description:
- "AWS region to create the bucket in. If not set then the value of the EC2_REGION and AWS_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect."
required: false
default: null
version_added: "1.8"
requirements: [ "boto" ]
author: Lester Wade, Ralph Tice
'''
EXAMPLES = '''
# Simple PUT operation
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put
# Simple GET operation
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get
# GET/download and overwrite local file (trust remote)
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get
# GET/download and do not overwrite local file (trust remote)
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get force=false
# PUT/upload and overwrite remote file (trust local)
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put
# PUT/upload with metadata
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip'
# PUT/upload with multiple metadata
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache'
# PUT/upload and do not overwrite remote file (trust local)
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put force=false
# Download an object as a string to use else where in your playbook
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=getstr
# Create an empty bucket
- s3: bucket=mybucket mode=create
# Create a bucket with key as directory
- s3: bucket=mybucket object=/my/directory/path mode=create
# Create an empty bucket in the EU region
- s3: bucket=mybucket mode=create region=eu-west-1
# Delete a bucket and all contents
- s3: bucket=mybucket mode=delete
'''
import sys
import os
import urlparse
import hashlib
try:
import boto
from boto.s3.connection import Location
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
def key_check(module, s3, bucket, obj):
try:
bucket = s3.lookup(bucket)
key_check = bucket.get_key(obj)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if key_check:
return True
else:
return False
def keysum(module, s3, bucket, obj):
bucket = s3.lookup(bucket)
key_check = bucket.get_key(obj)
if not key_check:
return None
md5_remote = key_check.etag[1:-1]
etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
if etag_multipart is True:
module.fail_json(msg="Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum.")
return md5_remote
def bucket_check(module, s3, bucket):
try:
result = s3.lookup(bucket)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if result:
return True
else:
return False
def create_bucket(module, s3, bucket, location=Location.DEFAULT):
try:
bucket = s3.create_bucket(bucket, location=location)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if bucket:
return True
def delete_bucket(module, s3, bucket):
try:
bucket = s3.lookup(bucket)
bucket_contents = bucket.list()
bucket.delete_keys([key.name for key in bucket_contents])
bucket.delete()
return True
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def delete_key(module, s3, bucket, obj):
try:
bucket = s3.lookup(bucket)
bucket.delete_key(obj)
module.exit_json(msg="Object deleted from bucket %s"%bucket, changed=True)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def create_dirkey(module, s3, bucket, obj):
try:
bucket = s3.lookup(bucket)
key = bucket.new_key(obj)
key.set_contents_from_string('')
module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def upload_file_check(src):
if os.path.exists(src):
file_exists is True
else:
file_exists is False
if os.path.isdir(src):
module.fail_json(msg="Specifying a directory is not a valid source for upload.", failed=True)
return file_exists
def path_check(path):
if os.path.exists(path):
return True
else:
return False
def upload_s3file(module, s3, bucket, obj, src, expiry, metadata):
try:
bucket = s3.lookup(bucket)
key = bucket.new_key(obj)
if metadata:
for meta_key in metadata.keys():
key.set_metadata(meta_key, metadata[meta_key])
key.set_contents_from_filename(src)
url = key.generate_url(expiry)
module.exit_json(msg="PUT operation complete", url=url, changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def download_s3file(module, s3, bucket, obj, dest):
try:
bucket = s3.lookup(bucket)
key = bucket.lookup(obj)
key.get_contents_to_filename(dest)
module.exit_json(msg="GET operation complete", changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def download_s3str(module, s3, bucket, obj):
try:
bucket = s3.lookup(bucket)
key = bucket.lookup(obj)
contents = key.get_contents_as_string()
module.exit_json(msg="GET operation complete", contents=contents, changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def get_download_url(module, s3, bucket, obj, expiry, changed=True):
try:
bucket = s3.lookup(bucket)
key = bucket.lookup(obj)
url = key.generate_url(expiry)
module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def is_fakes3(s3_url):
""" Return True if s3_url has scheme fakes3:// """
if s3_url is not None:
return urlparse.urlparse(s3_url).scheme == 'fakes3'
else:
return False
def is_walrus(s3_url):
""" Return True if it's Walrus endpoint, not S3
We assume anything other than *.amazonaws.com is Walrus"""
if s3_url is not None:
o = urlparse.urlparse(s3_url)
return not o.hostname.endswith('amazonaws.com')
else:
return False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
bucket = dict(required=True),
object = dict(),
src = dict(),
dest = dict(default=None),
mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr'], required=True),
expiry = dict(default=600, aliases=['expiration']),
s3_url = dict(aliases=['S3_URL']),
overwrite = dict(aliases=['force'], default=True, type='bool'),
metadata = dict(type='dict'),
),
)
module = AnsibleModule(argument_spec=argument_spec)
bucket = module.params.get('bucket')
obj = module.params.get('object')
src = module.params.get('src')
if module.params.get('dest'):
dest = os.path.expanduser(module.params.get('dest'))
mode = module.params.get('mode')
expiry = int(module.params['expiry'])
s3_url = module.params.get('s3_url')
overwrite = module.params.get('overwrite')
metadata = module.params.get('metadata')
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
if region in ('us-east-1', '', None):
# S3ism for the US Standard region
location = Location.DEFAULT
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
if module.params.get('object'):
obj = os.path.expanduser(module.params['object'])
# allow eucarc environment variables to be used if ansible vars aren't set
if not s3_url and 'S3_URL' in os.environ:
s3_url = os.environ['S3_URL']
# Look at s3_url and tweak connection settings
# if connecting to Walrus or fakes3
if is_fakes3(s3_url):
try:
fakes3 = urlparse.urlparse(s3_url)
from boto.s3.connection import OrdinaryCallingFormat
s3 = boto.connect_s3(
aws_access_key,
aws_secret_key,
is_secure=False,
host=fakes3.hostname,
port=fakes3.port,
calling_format=OrdinaryCallingFormat())
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
elif is_walrus(s3_url):
try:
walrus = urlparse.urlparse(s3_url).hostname
s3 = boto.connect_walrus(walrus, aws_access_key, aws_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
else:
try:
s3 = boto.connect_s3(aws_access_key, aws_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
# If our mode is a GET operation (download), go through the procedure as appropriate ...
if mode == 'get':
# First, we check to see if the bucket exists, we get "bucket" returned.
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False:
module.fail_json(msg="Target bucket cannot be found", failed=True)
# Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
keyrtn = key_check(module, s3, bucket, obj)
if keyrtn is False:
module.fail_json(msg="Target key cannot be found", failed=True)
# If the destination path doesn't exist, no need to md5um etag check, so just download.
pathrtn = path_check(dest)
if pathrtn is False:
download_s3file(module, s3, bucket, obj, dest)
# Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists.
if pathrtn is True:
md5_remote = keysum(module, s3, bucket, obj)
md5_local = hashlib.md5(open(dest, 'rb').read()).hexdigest()
if md5_local == md5_remote:
sum_matches = True
if overwrite is True:
download_s3file(module, s3, bucket, obj, dest)
else:
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False)
else:
sum_matches = False
if overwrite is True:
download_s3file(module, s3, bucket, obj, dest)
else:
module.fail_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True)
# Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message.
if sum_matches is True and overwrite is False:
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False)
# At this point explicitly define the overwrite condition.
if sum_matches is True and pathrtn is True and overwrite is True:
download_s3file(module, s3, bucket, obj, dest)
# If sum does not match but the destination exists, we
# if our mode is a PUT operation (upload), go through the procedure as appropriate ...
if mode == 'put':
# Use this snippet to debug through conditionals:
# module.exit_json(msg="Bucket return %s"%bucketrtn)
# sys.exit(0)
# Lets check the src path.
pathrtn = path_check(src)
if pathrtn is False:
module.fail_json(msg="Local object for PUT does not exist", failed=True)
# Lets check to see if bucket exists to get ground truth.
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
keyrtn = key_check(module, s3, bucket, obj)
# Lets check key state. Does it exist and if it does, compute the etag md5sum.
if bucketrtn is True and keyrtn is True:
md5_remote = keysum(module, s3, bucket, obj)
md5_local = hashlib.md5(open(src, 'rb').read()).hexdigest()
if md5_local == md5_remote:
sum_matches = True
if overwrite is True:
upload_s3file(module, s3, bucket, obj, src, expiry, metadata)
else:
get_download_url(module, s3, bucket, obj, expiry, changed=False)
else:
sum_matches = False
if overwrite is True:
upload_s3file(module, s3, bucket, obj, src, expiry, metadata)
else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True)
# If neither exist (based on bucket existence), we can create both.
if bucketrtn is False and pathrtn is True:
create_bucket(module, s3, bucket, location)
upload_s3file(module, s3, bucket, obj, src, expiry, metadata)
# If bucket exists but key doesn't, just upload.
if bucketrtn is True and pathrtn is True and keyrtn is False:
upload_s3file(module, s3, bucket, obj, src, expiry, metadata)
# Support for deleting an object if we have both params.
if mode == 'delete':
if bucket:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
deletertn = delete_bucket(module, s3, bucket)
if deletertn is True:
module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=True)
else:
module.fail_json(msg="Bucket does not exist.", changed=False)
else:
module.fail_json(msg="Bucket parameter is required.", failed=True)
# Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
# WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
if mode == 'create':
if bucket and not obj:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
module.exit_json(msg="Bucket already exists.", changed=False)
else:
module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
if bucket and obj:
bucketrtn = bucket_check(module, s3, bucket)
if obj.endswith('/'):
dirobj = obj
else:
dirobj = obj + "/"
if bucketrtn is True:
keyrtn = key_check(module, s3, bucket, dirobj)
if keyrtn is True:
module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False)
else:
create_dirkey(module, s3, bucket, dirobj)
if bucketrtn is False:
created = create_bucket(module, s3, bucket, location)
create_dirkey(module, s3, bucket, dirobj)
# Support for grabbing the time-expired URL for an object in S3/Walrus.
if mode == 'geturl':
if bucket and obj:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False:
module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
else:
keyrtn = key_check(module, s3, bucket, obj)
if keyrtn is True:
get_download_url(module, s3, bucket, obj, expiry)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
else:
module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
if mode == 'getstr':
if bucket and obj:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False:
module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
else:
keyrtn = key_check(module, s3, bucket, obj)
if keyrtn is True:
download_s3str(module, s3, bucket, obj)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
module.exit_json(failed=False)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save