Migrated to ansible.amazon

pull/68117/head
Ansible Core Team 5 years ago
parent ab5942a760
commit 42b02d1be2

@ -1,212 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
# Author:
# - Matthew Davis <Matthew.Davis.2@team.telstra.com>
# on behalf of Telstra Corporation Limited
#
# Common functionality to be used by the modules:
# - acm
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
"""
Common Amazon Certificate Manager facts shared between modules
"""
import traceback
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry, HAS_BOTO3, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
from ansible.module_utils._text import to_bytes
try:
import botocore
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # caught by imported HAS_BOTO3
class ACMServiceManager(object):
"""Handles ACM Facts Services"""
def __init__(self, module):
self.module = module
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
self.client = module.client('acm')
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['RequestInProgressException'])
def delete_certificate_with_backoff(self, client, arn):
client.delete_certificate(CertificateArn=arn)
def delete_certificate(self, client, module, arn):
module.debug("Attempting to delete certificate %s" % arn)
try:
self.delete_certificate_with_backoff(client, arn)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't delete certificate %s" % arn)
module.debug("Successfully deleted certificate %s" % arn)
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['RequestInProgressException'])
def list_certificates_with_backoff(self, client, statuses=None):
paginator = client.get_paginator('list_certificates')
kwargs = dict()
if statuses:
kwargs['CertificateStatuses'] = statuses
return paginator.paginate(**kwargs).build_full_result()['CertificateSummaryList']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['ResourceNotFoundException', 'RequestInProgressException'])
def get_certificate_with_backoff(self, client, certificate_arn):
response = client.get_certificate(CertificateArn=certificate_arn)
# strip out response metadata
return {'Certificate': response['Certificate'],
'CertificateChain': response['CertificateChain']}
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['ResourceNotFoundException', 'RequestInProgressException'])
def describe_certificate_with_backoff(self, client, certificate_arn):
return client.describe_certificate(CertificateArn=certificate_arn)['Certificate']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['ResourceNotFoundException', 'RequestInProgressException'])
def list_certificate_tags_with_backoff(self, client, certificate_arn):
return client.list_tags_for_certificate(CertificateArn=certificate_arn)['Tags']
# Returns a list of certificates
# if domain_name is specified, returns only certificates with that domain
# if an ARN is specified, returns only that certificate
# only_tags is a dict, e.g. {'key':'value'}. If specified this function will return
# only certificates which contain all those tags (key exists, value matches).
def get_certificates(self, client, module, domain_name=None, statuses=None, arn=None, only_tags=None):
try:
all_certificates = self.list_certificates_with_backoff(client=client, statuses=statuses)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't obtain certificates")
if domain_name:
certificates = [cert for cert in all_certificates
if cert['DomainName'] == domain_name]
else:
certificates = all_certificates
if arn:
# still return a list, not just one item
certificates = [c for c in certificates if c['CertificateArn'] == arn]
results = []
for certificate in certificates:
try:
cert_data = self.describe_certificate_with_backoff(client, certificate['CertificateArn'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't obtain certificate metadata for domain %s" % certificate['DomainName'])
# in some states, ACM resources do not have a corresponding cert
if cert_data['Status'] not in ['PENDING_VALIDATION', 'VALIDATION_TIMED_OUT', 'FAILED']:
try:
cert_data.update(self.get_certificate_with_backoff(client, certificate['CertificateArn']))
except (BotoCoreError, ClientError, KeyError) as e:
module.fail_json_aws(e, msg="Couldn't obtain certificate data for domain %s" % certificate['DomainName'])
cert_data = camel_dict_to_snake_dict(cert_data)
try:
tags = self.list_certificate_tags_with_backoff(client, certificate['CertificateArn'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't obtain tags for domain %s" % certificate['DomainName'])
cert_data['tags'] = boto3_tag_list_to_ansible_dict(tags)
results.append(cert_data)
if only_tags:
for tag_key in only_tags:
try:
results = [c for c in results if ('tags' in c) and (tag_key in c['tags']) and (c['tags'][tag_key] == only_tags[tag_key])]
except (TypeError, AttributeError) as e:
for c in results:
if 'tags' not in c:
module.debug("cert is %s" % str(c))
module.fail_json(msg="ACM tag filtering err", exception=e)
return results
# returns the domain name of a certificate (encoded in the public cert)
# for a given ARN
# A cert with that ARN must already exist
def get_domain_of_cert(self, client, module, arn):
if arn is None:
module.fail(msg="Internal error with ACM domain fetching, no certificate ARN specified")
try:
cert_data = self.describe_certificate_with_backoff(client=client, certificate_arn=arn)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't obtain certificate data for arn %s" % arn)
return cert_data['DomainName']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def import_certificate_with_backoff(self, client, certificate, private_key, certificate_chain, arn):
if certificate_chain:
if arn:
ret = client.import_certificate(Certificate=to_bytes(certificate),
PrivateKey=to_bytes(private_key),
CertificateChain=to_bytes(certificate_chain),
CertificateArn=arn)
else:
ret = client.import_certificate(Certificate=to_bytes(certificate),
PrivateKey=to_bytes(private_key),
CertificateChain=to_bytes(certificate_chain))
else:
if arn:
ret = client.import_certificate(Certificate=to_bytes(certificate),
PrivateKey=to_bytes(private_key),
CertificateArn=arn)
else:
ret = client.import_certificate(Certificate=to_bytes(certificate),
PrivateKey=to_bytes(private_key))
return ret['CertificateArn']
# Tags are a normal Ansible style dict
# {'Key':'Value'}
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0, catch_extra_error_codes=['ResourceNotFoundException', 'RequestInProgressException'])
def tag_certificate_with_backoff(self, client, arn, tags):
aws_tags = ansible_dict_to_boto3_tag_list(tags)
client.add_tags_to_certificate(CertificateArn=arn, Tags=aws_tags)
def import_certificate(self, client, module, certificate, private_key, arn=None, certificate_chain=None, tags=None):
original_arn = arn
# upload cert
try:
arn = self.import_certificate_with_backoff(client, certificate, private_key, certificate_chain, arn)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Couldn't upload new certificate")
if original_arn and (arn != original_arn):
# I'm not sure whether the API guarentees that the ARN will not change
# I'm failing just in case.
# If I'm wrong, I'll catch it in the integration tests.
module.fail_json(msg="ARN changed with ACM update, from %s to %s" % (original_arn, arn))
# tag that cert
try:
self.tag_certificate_with_backoff(client, arn, tags)
except (BotoCoreError, ClientError) as e:
module.debug("Attempting to delete the cert we just created, arn=%s" % arn)
try:
self.delete_certificate_with_backoff(client, arn)
except Exception as f:
module.warn("Certificate %s exists, and is not tagged. So Ansible will not see it on the next run.")
module.fail_json_aws(e, msg="Couldn't tag certificate %s, couldn't delete it either" % arn)
module.fail_json_aws(e, msg="Couldn't tag certificate %s" % arn)
return arn

@ -1,103 +0,0 @@
# Copyright (c) 2017 Ansible Project
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This module adds shared support for Batch modules.
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, snake_dict_to_camel_dict
try:
from botocore.exceptions import ClientError
except ImportError:
pass # Handled by HAS_BOTO3
class AWSConnection(object):
"""
Create the connection object and client objects as required.
"""
def __init__(self, ansible_obj, resources, boto3=True):
ansible_obj.deprecate("The 'ansible.module_utils.aws.batch.AWSConnection' class is deprecated please use 'AnsibleAWSModule.client()'",
version='2.14')
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3)
self.resource_client = dict()
if not resources:
resources = ['batch']
resources.append('iam')
for resource in resources:
aws_connect_kwargs.update(dict(region=self.region,
endpoint=self.endpoint,
conn_type='client',
resource=resource
))
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
# if region is not provided, then get default profile/session region
if not self.region:
self.region = self.resource_client['batch'].meta.region_name
# set account ID
try:
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
except (ClientError, ValueError, KeyError, IndexError):
self.account_id = ''
def client(self, resource='batch'):
return self.resource_client[resource]
def cc(key):
"""
Changes python key into Camel case equivalent. For example, 'compute_environment_name' becomes
'computeEnvironmentName'.
:param key:
:return:
"""
components = key.split('_')
return components[0] + "".join([token.capitalize() for token in components[1:]])
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None)
return snake_dict_to_camel_dict(api_params)

@ -1,235 +0,0 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Willem van Ketwich
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
#
# Author:
# - Willem van Ketwich <willem@vanketwich.com.au>
#
# Common functionality to be used by the modules:
# - cloudfront_distribution
# - cloudfront_invalidation
# - cloudfront_origin_access_identity
"""
Common cloudfront facts shared between modules
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
try:
import botocore
except ImportError:
pass
class CloudFrontFactsServiceManager(object):
"""Handles CloudFront Facts Services"""
def __init__(self, module):
self.module = module
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
self.client = boto3_conn(module, conn_type='client',
resource='cloudfront', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
def get_distribution(self, distribution_id):
try:
return self.client.get_distribution(Id=distribution_id)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error describing distribution")
def get_distribution_config(self, distribution_id):
try:
return self.client.get_distribution_config(Id=distribution_id)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error describing distribution configuration")
def get_origin_access_identity(self, origin_access_identity_id):
try:
return self.client.get_cloud_front_origin_access_identity(Id=origin_access_identity_id)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error describing origin access identity")
def get_origin_access_identity_config(self, origin_access_identity_id):
try:
return self.client.get_cloud_front_origin_access_identity_config(Id=origin_access_identity_id)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error describing origin access identity configuration")
def get_invalidation(self, distribution_id, invalidation_id):
try:
return self.client.get_invalidation(DistributionId=distribution_id, Id=invalidation_id)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error describing invalidation")
def get_streaming_distribution(self, distribution_id):
try:
return self.client.get_streaming_distribution(Id=distribution_id)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error describing streaming distribution")
def get_streaming_distribution_config(self, distribution_id):
try:
return self.client.get_streaming_distribution_config(Id=distribution_id)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error describing streaming distribution")
def list_origin_access_identities(self):
try:
paginator = self.client.get_paginator('list_cloud_front_origin_access_identities')
result = paginator.paginate().build_full_result().get('CloudFrontOriginAccessIdentityList', {})
return result.get('Items', [])
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error listing cloud front origin access identities")
def list_distributions(self, keyed=True):
try:
paginator = self.client.get_paginator('list_distributions')
result = paginator.paginate().build_full_result().get('DistributionList', {})
distribution_list = result.get('Items', [])
if not keyed:
return distribution_list
return self.keyed_list_helper(distribution_list)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error listing distributions")
def list_distributions_by_web_acl_id(self, web_acl_id):
try:
result = self.client.list_distributions_by_web_acl_id(WebAclId=web_acl_id)
distribution_list = result.get('DistributionList', {}).get('Items', [])
return self.keyed_list_helper(distribution_list)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error listing distributions by web acl id")
def list_invalidations(self, distribution_id):
try:
paginator = self.client.get_paginator('list_invalidations')
result = paginator.paginate(DistributionId=distribution_id).build_full_result()
return result.get('InvalidationList', {}).get('Items', [])
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error listing invalidations")
def list_streaming_distributions(self, keyed=True):
try:
paginator = self.client.get_paginator('list_streaming_distributions')
result = paginator.paginate().build_full_result()
streaming_distribution_list = result.get('StreamingDistributionList', {}).get('Items', [])
if not keyed:
return streaming_distribution_list
return self.keyed_list_helper(streaming_distribution_list)
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error listing streaming distributions")
def summary(self):
summary_dict = {}
summary_dict.update(self.summary_get_distribution_list(False))
summary_dict.update(self.summary_get_distribution_list(True))
summary_dict.update(self.summary_get_origin_access_identity_list())
return summary_dict
def summary_get_origin_access_identity_list(self):
try:
origin_access_identity_list = {'origin_access_identities': []}
origin_access_identities = self.list_origin_access_identities()
for origin_access_identity in origin_access_identities:
oai_id = origin_access_identity['Id']
oai_full_response = self.get_origin_access_identity(oai_id)
oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']}
origin_access_identity_list['origin_access_identities'].append(oai_summary)
return origin_access_identity_list
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error generating summary of origin access identities")
def summary_get_distribution_list(self, streaming=False):
try:
list_name = 'streaming_distributions' if streaming else 'distributions'
key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled']
distribution_list = {list_name: []}
distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False)
for dist in distributions:
temp_distribution = {}
for key_name in key_list:
temp_distribution[key_name] = dist[key_name]
temp_distribution['Aliases'] = [alias for alias in dist['Aliases'].get('Items', [])]
temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming)
if not streaming:
temp_distribution['WebACLId'] = dist['WebACLId']
invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id'])
if invalidation_ids:
temp_distribution['Invalidations'] = invalidation_ids
resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN'])
temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', []))
distribution_list[list_name].append(temp_distribution)
return distribution_list
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error generating summary of distributions")
except Exception as e:
self.module.fail_json_aws(e, msg="Error generating summary of distributions")
def get_etag_from_distribution_id(self, distribution_id, streaming):
distribution = {}
if not streaming:
distribution = self.get_distribution(distribution_id)
else:
distribution = self.get_streaming_distribution(distribution_id)
return distribution['ETag']
def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id):
try:
invalidation_ids = []
invalidations = self.list_invalidations(distribution_id)
for invalidation in invalidations:
invalidation_ids.append(invalidation['Id'])
return invalidation_ids
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error getting list of invalidation ids")
def get_distribution_id_from_domain_name(self, domain_name):
try:
distribution_id = ""
distributions = self.list_distributions(False)
distributions += self.list_streaming_distributions(False)
for dist in distributions:
if 'Items' in dist['Aliases']:
for alias in dist['Aliases']['Items']:
if str(alias).lower() == domain_name.lower():
distribution_id = dist['Id']
break
return distribution_id
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error getting distribution id from domain name")
def get_aliases_from_distribution_id(self, distribution_id):
try:
distribution = self.get_distribution(distribution_id)
return distribution['DistributionConfig']['Aliases'].get('Items', [])
except botocore.exceptions.ClientError as e:
self.module.fail_json_aws(e, msg="Error getting list of aliases from distribution_id")
def keyed_list_helper(self, list_to_key):
keyed_list = dict()
for item in list_to_key:
distribution_id = item['Id']
if 'Items' in item['Aliases']:
aliases = item['Aliases']['Items']
for alias in aliases:
keyed_list.update({alias: item})
keyed_list.update({distribution_id: item})
return keyed_list

@ -1,335 +0,0 @@
#
# Copyright 2017 Michael De La Rue | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""This module adds shared support for generic Amazon AWS modules
**This code is not yet ready for use in user modules. As of 2017**
**and through to 2018, the interface is likely to change**
**aggressively as the exact correct interface for ansible AWS modules**
**is identified. In particular, until this notice goes away or is**
**changed, methods may disappear from the interface. Please don't**
**publish modules using this except directly to the main Ansible**
**development repository.**
In order to use this module, include it as part of a custom
module as shown below.
from ansible.module_utils.aws import AnsibleAWSModule
module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean
mutually_exclusive=list1, required_together=list2)
The 'AnsibleAWSModule' module provides similar, but more restricted,
interfaces to the normal Ansible module. It also includes the
additional methods for connecting to AWS using the standard module arguments
m.resource('lambda') # - get an AWS connection as a boto3 resource.
or
m.client('sts') # - get an AWS connection as a boto3 client.
To make use of AWSRetry easier, it can now be wrapped around any call from a
module-created client. To add retries to a client, create a client:
m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
Any calls from that client can be made to use the decorator passed at call-time
using the `aws_retry` argument. By default, no retries are used.
ec2 = m.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10))
ec2.describe_instances(InstanceIds=['i-123456789'], aws_retry=True)
The call will be retried the specified number of times, so the calling functions
don't need to be wrapped in the backoff decorator.
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import logging
import traceback
from functools import wraps
from distutils.version import LooseVersion
try:
from cStringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, ec2_argument_spec, boto3_conn
from ansible.module_utils.ec2 import get_aws_connection_info, get_aws_region
# We will also export HAS_BOTO3 so end user modules can use it.
__all__ = ('AnsibleAWSModule', 'HAS_BOTO3', 'is_boto3_error_code')
class AnsibleAWSModule(object):
"""An ansible module class for AWS modules
AnsibleAWSModule provides an a class for building modules which
connect to Amazon Web Services. The interface is currently more
restricted than the basic module class with the aim that later the
basic module class can be reduced. If you find that any key
feature is missing please contact the author/Ansible AWS team
(available on #ansible-aws on IRC) to request the additional
features needed.
"""
default_settings = {
"default_args": True,
"check_boto3": True,
"auto_retry": True,
"module_class": AnsibleModule
}
def __init__(self, **kwargs):
local_settings = {}
for key in AnsibleAWSModule.default_settings:
try:
local_settings[key] = kwargs.pop(key)
except KeyError:
local_settings[key] = AnsibleAWSModule.default_settings[key]
self.settings = local_settings
if local_settings["default_args"]:
# ec2_argument_spec contains the region so we use that; there's a patch coming which
# will add it to aws_argument_spec so if that's accepted then later we should change
# over
argument_spec_full = ec2_argument_spec()
try:
argument_spec_full.update(kwargs["argument_spec"])
except (TypeError, NameError):
pass
kwargs["argument_spec"] = argument_spec_full
self._module = AnsibleAWSModule.default_settings["module_class"](**kwargs)
if local_settings["check_boto3"] and not HAS_BOTO3:
self._module.fail_json(
msg=missing_required_lib('botocore or boto3'))
self.check_mode = self._module.check_mode
self._diff = self._module._diff
self._name = self._module._name
self._botocore_endpoint_log_stream = StringIO()
self.logger = None
if self.params.get('debug_botocore_endpoint_logs'):
self.logger = logging.getLogger('botocore.endpoint')
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(logging.StreamHandler(self._botocore_endpoint_log_stream))
@property
def params(self):
return self._module.params
def _get_resource_action_list(self):
actions = []
for ln in self._botocore_endpoint_log_stream.getvalue().split('\n'):
ln = ln.strip()
if not ln:
continue
found_operational_request = re.search(r"OperationModel\(name=.*?\)", ln)
if found_operational_request:
operation_request = found_operational_request.group(0)[20:-1]
resource = re.search(r"https://.*?\.", ln).group(0)[8:-1]
actions.append("{0}:{1}".format(resource, operation_request))
return list(set(actions))
def exit_json(self, *args, **kwargs):
if self.params.get('debug_botocore_endpoint_logs'):
kwargs['resource_actions'] = self._get_resource_action_list()
return self._module.exit_json(*args, **kwargs)
def fail_json(self, *args, **kwargs):
if self.params.get('debug_botocore_endpoint_logs'):
kwargs['resource_actions'] = self._get_resource_action_list()
return self._module.fail_json(*args, **kwargs)
def debug(self, *args, **kwargs):
return self._module.debug(*args, **kwargs)
def warn(self, *args, **kwargs):
return self._module.warn(*args, **kwargs)
def deprecate(self, *args, **kwargs):
return self._module.deprecate(*args, **kwargs)
def boolean(self, *args, **kwargs):
return self._module.boolean(*args, **kwargs)
def md5(self, *args, **kwargs):
return self._module.md5(*args, **kwargs)
def client(self, service, retry_decorator=None):
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
conn = boto3_conn(self, conn_type='client', resource=service,
region=region, endpoint=ec2_url, **aws_connect_kwargs)
return conn if retry_decorator is None else _RetryingBotoClientWrapper(conn, retry_decorator)
def resource(self, service):
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True)
return boto3_conn(self, conn_type='resource', resource=service,
region=region, endpoint=ec2_url, **aws_connect_kwargs)
@property
def region(self, boto3=True):
return get_aws_region(self, boto3)
def fail_json_aws(self, exception, msg=None):
"""call fail_json with processed exception
function for converting exceptions thrown by AWS SDK modules,
botocore, boto3 and boto, into nice error messages.
"""
last_traceback = traceback.format_exc()
# to_native is trusted to handle exceptions that str() could
# convert to text.
try:
except_msg = to_native(exception.message)
except AttributeError:
except_msg = to_native(exception)
if msg is not None:
message = '{0}: {1}'.format(msg, except_msg)
else:
message = except_msg
try:
response = exception.response
except AttributeError:
response = None
failure = dict(
msg=message,
exception=last_traceback,
**self._gather_versions()
)
if response is not None:
failure.update(**camel_dict_to_snake_dict(response))
self.fail_json(**failure)
def _gather_versions(self):
"""Gather AWS SDK (boto3 and botocore) dependency versions
Returns {'boto3_version': str, 'botocore_version': str}
Returns {} if neither are installed
"""
if not HAS_BOTO3:
return {}
import boto3
import botocore
return dict(boto3_version=boto3.__version__,
botocore_version=botocore.__version__)
def boto3_at_least(self, desired):
"""Check if the available boto3 version is greater than or equal to a desired version.
Usage:
if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'):
# conditionally fail on old boto3 versions if a specific feature is not supported
module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.")
"""
existing = self._gather_versions()
return LooseVersion(existing['boto3_version']) >= LooseVersion(desired)
def botocore_at_least(self, desired):
"""Check if the available botocore version is greater than or equal to a desired version.
Usage:
if not module.botocore_at_least('1.2.3'):
module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3')
if not module.botocore_at_least('1.5.3'):
module.warn('Botocore did not include waiters for Service X before 1.5.3. '
'To wait until Service X resources are fully available, update botocore.')
"""
existing = self._gather_versions()
return LooseVersion(existing['botocore_version']) >= LooseVersion(desired)
class _RetryingBotoClientWrapper(object):
__never_wait = (
'get_paginator', 'can_paginate',
'get_waiter', 'generate_presigned_url',
)
def __init__(self, client, retry):
self.client = client
self.retry = retry
def _create_optional_retry_wrapper_function(self, unwrapped):
retrying_wrapper = self.retry(unwrapped)
@wraps(unwrapped)
def deciding_wrapper(aws_retry=False, *args, **kwargs):
if aws_retry:
return retrying_wrapper(*args, **kwargs)
else:
return unwrapped(*args, **kwargs)
return deciding_wrapper
def __getattr__(self, name):
unwrapped = getattr(self.client, name)
if name in self.__never_wait:
return unwrapped
elif callable(unwrapped):
wrapped = self._create_optional_retry_wrapper_function(unwrapped)
setattr(self, name, wrapped)
return wrapped
else:
return unwrapped
def is_boto3_error_code(code, e=None):
"""Check if the botocore exception is raised by a specific error code.
Returns ClientError if the error code matches, a dummy exception if it does not have an error code or does not match
Example:
try:
ec2.describe_instances(InstanceIds=['potato'])
except is_boto3_error_code('InvalidInstanceID.Malformed'):
# handle the error for that code case
except botocore.exceptions.ClientError as e:
# handle the generic error case for all other codes
"""
from botocore.exceptions import ClientError
if e is None:
import sys
dummy, e, dummy = sys.exc_info()
if isinstance(e, ClientError) and e.response['Error']['Code'] == code:
return ClientError
return type('NeverEverRaisedException', (Exception,), {})
def get_boto3_client_method_parameters(client, method_name, required=False):
op = client.meta.method_to_api_mapping.get(method_name)
input_shape = client._service_model.operation_model(op).input_shape
if not input_shape:
parameters = []
elif required:
parameters = list(input_shape.required_members)
else:
parameters = list(input_shape.members.keys())
return parameters

@ -1,87 +0,0 @@
# Copyright (c) 2017 Ansible Project
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This module adds shared support for Direct Connect modules.
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import traceback
try:
import botocore
except ImportError:
pass
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
class DirectConnectError(Exception):
def __init__(self, msg, last_traceback=None, exception=None):
self.msg = msg
self.last_traceback = last_traceback
self.exception = exception
def delete_connection(client, connection_id):
try:
client.delete_connection(connectionId=connection_id)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to delete DirectConnection {0}.".format(connection_id),
last_traceback=traceback.format_exc(),
exception=e)
def associate_connection_and_lag(client, connection_id, lag_id):
try:
client.associate_connection_with_lag(connectionId=connection_id,
lagId=lag_id)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to associate Direct Connect connection {0}"
" with link aggregation group {1}.".format(connection_id, lag_id),
last_traceback=traceback.format_exc(),
exception=e)
def disassociate_connection_and_lag(client, connection_id, lag_id):
try:
client.disassociate_connection_from_lag(connectionId=connection_id,
lagId=lag_id)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Failed to disassociate Direct Connect connection {0}"
" from link aggregation group {1}.".format(connection_id, lag_id),
last_traceback=traceback.format_exc(),
exception=e)
def delete_virtual_interface(client, virtual_interface):
try:
client.delete_virtual_interface(virtualInterfaceId=virtual_interface)
except botocore.exceptions.ClientError as e:
raise DirectConnectError(msg="Could not delete virtual interface {0}".format(virtual_interface),
last_traceback=traceback.format_exc(),
exception=e)

@ -1,112 +0,0 @@
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.ec2 import AWSRetry
# Non-ansible imports
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass
def get_elb(connection, module, elb_name):
"""
Get an ELB based on name. If not found, return None.
:param connection: AWS boto3 elbv2 connection
:param module: Ansible module
:param elb_name: Name of load balancer to get
:return: boto3 ELB dict or None if not found
"""
try:
return _get_elb(connection, module, elb_name)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
@AWSRetry.jittered_backoff()
def _get_elb(connection, module, elb_name):
"""
Get an ELB based on name using AWSRetry. If not found, return None.
:param connection: AWS boto3 elbv2 connection
:param module: Ansible module
:param elb_name: Name of load balancer to get
:return: boto3 ELB dict or None if not found
"""
try:
load_balancer_paginator = connection.get_paginator('describe_load_balancers')
return (load_balancer_paginator.paginate(Names=[elb_name]).build_full_result())['LoadBalancers'][0]
except (BotoCoreError, ClientError) as e:
if e.response['Error']['Code'] == 'LoadBalancerNotFound':
return None
else:
raise e
def get_elb_listener(connection, module, elb_arn, listener_port):
"""
Get an ELB listener based on the port provided. If not found, return None.
:param connection: AWS boto3 elbv2 connection
:param module: Ansible module
:param elb_arn: ARN of the ELB to look at
:param listener_port: Port of the listener to look for
:return: boto3 ELB listener dict or None if not found
"""
try:
listener_paginator = connection.get_paginator('describe_listeners')
listeners = (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=elb_arn).build_full_result())['Listeners']
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
l = None
for listener in listeners:
if listener['Port'] == listener_port:
l = listener
break
return l
def get_elb_listener_rules(connection, module, listener_arn):
"""
Get rules for a particular ELB listener using the listener ARN.
:param connection: AWS boto3 elbv2 connection
:param module: Ansible module
:param listener_arn: ARN of the ELB listener
:return: boto3 ELB rules list
"""
try:
return AWSRetry.jittered_backoff()(connection.describe_rules)(ListenerArn=listener_arn)['Rules']
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
def convert_tg_name_to_arn(connection, module, tg_name):
"""
Get ARN of a target group using the target group's name
:param connection: AWS boto3 elbv2 connection
:param module: Ansible module
:param tg_name: Name of the target group
:return: target group ARN string
"""
try:
response = AWSRetry.jittered_backoff()(connection.describe_target_groups)(Names=[tg_name])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e)
tg_arn = response['TargetGroups'][0]['TargetGroupArn']
return tg_arn

@ -1,891 +0,0 @@
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# Ansible imports
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, get_ec2_security_group_ids_from_names, \
ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict, compare_policies as compare_dicts, \
AWSRetry
from ansible.module_utils.aws.elb_utils import get_elb, get_elb_listener, convert_tg_name_to_arn
# Non-ansible imports
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass
import traceback
from copy import deepcopy
class ElasticLoadBalancerV2(object):
def __init__(self, connection, module):
self.connection = connection
self.module = module
self.changed = False
self.new_load_balancer = False
self.scheme = module.params.get("scheme")
self.name = module.params.get("name")
self.subnet_mappings = module.params.get("subnet_mappings")
self.subnets = module.params.get("subnets")
self.deletion_protection = module.params.get("deletion_protection")
self.wait = module.params.get("wait")
if module.params.get("tags") is not None:
self.tags = ansible_dict_to_boto3_tag_list(module.params.get("tags"))
else:
self.tags = None
self.purge_tags = module.params.get("purge_tags")
self.elb = get_elb(connection, module, self.name)
if self.elb is not None:
self.elb_attributes = self.get_elb_attributes()
self.elb['tags'] = self.get_elb_tags()
else:
self.elb_attributes = None
def wait_for_status(self, elb_arn):
"""
Wait for load balancer to reach 'active' status
:param elb_arn: The load balancer ARN
:return:
"""
try:
waiter = self.connection.get_waiter('load_balancer_available')
waiter.wait(LoadBalancerArns=[elb_arn])
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
def get_elb_attributes(self):
"""
Get load balancer attributes
:return:
"""
try:
attr_list = AWSRetry.jittered_backoff()(
self.connection.describe_load_balancer_attributes
)(LoadBalancerArn=self.elb['LoadBalancerArn'])['Attributes']
elb_attributes = boto3_tag_list_to_ansible_dict(attr_list)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
# Replace '.' with '_' in attribute key names to make it more Ansibley
return dict((k.replace('.', '_'), v) for k, v in elb_attributes.items())
def update_elb_attributes(self):
"""
Update the elb_attributes parameter
:return:
"""
self.elb_attributes = self.get_elb_attributes()
def get_elb_tags(self):
"""
Get load balancer tags
:return:
"""
try:
return AWSRetry.jittered_backoff()(
self.connection.describe_tags
)(ResourceArns=[self.elb['LoadBalancerArn']])['TagDescriptions'][0]['Tags']
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
def delete_tags(self, tags_to_delete):
"""
Delete elb tags
:return:
"""
try:
AWSRetry.jittered_backoff()(
self.connection.remove_tags
)(ResourceArns=[self.elb['LoadBalancerArn']], TagKeys=tags_to_delete)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
self.changed = True
def modify_tags(self):
"""
Modify elb tags
:return:
"""
try:
AWSRetry.jittered_backoff()(
self.connection.add_tags
)(ResourceArns=[self.elb['LoadBalancerArn']], Tags=self.tags)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
self.changed = True
def delete(self):
"""
Delete elb
:return:
"""
try:
AWSRetry.jittered_backoff()(
self.connection.delete_load_balancer
)(LoadBalancerArn=self.elb['LoadBalancerArn'])
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
self.changed = True
def compare_subnets(self):
"""
Compare user subnets with current ELB subnets
:return: bool True if they match otherwise False
"""
subnet_mapping_id_list = []
subnet_mappings = []
# Check if we're dealing with subnets or subnet_mappings
if self.subnets is not None:
# Convert subnets to subnet_mappings format for comparison
for subnet in self.subnets:
subnet_mappings.append({'SubnetId': subnet})
if self.subnet_mappings is not None:
# Use this directly since we're comparing as a mapping
subnet_mappings = self.subnet_mappings
# Build a subnet_mapping style struture of what's currently
# on the load balancer
for subnet in self.elb['AvailabilityZones']:
this_mapping = {'SubnetId': subnet['SubnetId']}
for address in subnet.get('LoadBalancerAddresses', []):
if 'AllocationId' in address:
this_mapping['AllocationId'] = address['AllocationId']
break
subnet_mapping_id_list.append(this_mapping)
return set(frozenset(mapping.items()) for mapping in subnet_mapping_id_list) == set(frozenset(mapping.items()) for mapping in subnet_mappings)
def modify_subnets(self):
"""
Modify elb subnets to match module parameters
:return:
"""
try:
AWSRetry.jittered_backoff()(
self.connection.set_subnets
)(LoadBalancerArn=self.elb['LoadBalancerArn'], Subnets=self.subnets)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
self.changed = True
def update(self):
"""
Update the elb from AWS
:return:
"""
self.elb = get_elb(self.connection, self.module, self.module.params.get("name"))
self.elb['tags'] = self.get_elb_tags()
class ApplicationLoadBalancer(ElasticLoadBalancerV2):
def __init__(self, connection, connection_ec2, module):
"""
:param connection: boto3 connection
:param module: Ansible module
"""
super(ApplicationLoadBalancer, self).__init__(connection, module)
self.connection_ec2 = connection_ec2
# Ansible module parameters specific to ALBs
self.type = 'application'
if module.params.get('security_groups') is not None:
try:
self.security_groups = AWSRetry.jittered_backoff()(
get_ec2_security_group_ids_from_names
)(module.params.get('security_groups'), self.connection_ec2, boto3=True)
except ValueError as e:
self.module.fail_json(msg=str(e), exception=traceback.format_exc())
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
else:
self.security_groups = module.params.get('security_groups')
self.access_logs_enabled = module.params.get("access_logs_enabled")
self.access_logs_s3_bucket = module.params.get("access_logs_s3_bucket")
self.access_logs_s3_prefix = module.params.get("access_logs_s3_prefix")
self.idle_timeout = module.params.get("idle_timeout")
self.http2 = module.params.get("http2")
if self.elb is not None and self.elb['Type'] != 'application':
self.module.fail_json(msg="The load balancer type you are trying to manage is not application. Try elb_network_lb module instead.")
def create_elb(self):
"""
Create a load balancer
:return:
"""
# Required parameters
params = dict()
params['Name'] = self.name
params['Type'] = self.type
# Other parameters
if self.subnets is not None:
params['Subnets'] = self.subnets
if self.subnet_mappings is not None:
params['SubnetMappings'] = self.subnet_mappings
if self.security_groups is not None:
params['SecurityGroups'] = self.security_groups
params['Scheme'] = self.scheme
if self.tags:
params['Tags'] = self.tags
try:
self.elb = AWSRetry.jittered_backoff()(self.connection.create_load_balancer)(**params)['LoadBalancers'][0]
self.changed = True
self.new_load_balancer = True
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
if self.wait:
self.wait_for_status(self.elb['LoadBalancerArn'])
def modify_elb_attributes(self):
"""
Update Application ELB attributes if required
:return:
"""
update_attributes = []
if self.access_logs_enabled is not None and str(self.access_logs_enabled).lower() != self.elb_attributes['access_logs_s3_enabled']:
update_attributes.append({'Key': 'access_logs.s3.enabled', 'Value': str(self.access_logs_enabled).lower()})
if self.access_logs_s3_bucket is not None and self.access_logs_s3_bucket != self.elb_attributes['access_logs_s3_bucket']:
update_attributes.append({'Key': 'access_logs.s3.bucket', 'Value': self.access_logs_s3_bucket})
if self.access_logs_s3_prefix is not None and self.access_logs_s3_prefix != self.elb_attributes['access_logs_s3_prefix']:
update_attributes.append({'Key': 'access_logs.s3.prefix', 'Value': self.access_logs_s3_prefix})
if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']:
update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()})
if self.idle_timeout is not None and str(self.idle_timeout) != self.elb_attributes['idle_timeout_timeout_seconds']:
update_attributes.append({'Key': 'idle_timeout.timeout_seconds', 'Value': str(self.idle_timeout)})
if self.http2 is not None and str(self.http2).lower() != self.elb_attributes['routing_http2_enabled']:
update_attributes.append({'Key': 'routing.http2.enabled', 'Value': str(self.http2).lower()})
if update_attributes:
try:
AWSRetry.jittered_backoff()(
self.connection.modify_load_balancer_attributes
)(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes)
self.changed = True
except (BotoCoreError, ClientError) as e:
# Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state
if self.new_load_balancer:
AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn'])
self.module.fail_json_aws(e)
def compare_security_groups(self):
"""
Compare user security groups with current ELB security groups
:return: bool True if they match otherwise False
"""
if set(self.elb['SecurityGroups']) != set(self.security_groups):
return False
else:
return True
def modify_security_groups(self):
"""
Modify elb security groups to match module parameters
:return:
"""
try:
AWSRetry.jittered_backoff()(
self.connection.set_security_groups
)(LoadBalancerArn=self.elb['LoadBalancerArn'], SecurityGroups=self.security_groups)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
self.changed = True
class NetworkLoadBalancer(ElasticLoadBalancerV2):
def __init__(self, connection, connection_ec2, module):
"""
:param connection: boto3 connection
:param module: Ansible module
"""
super(NetworkLoadBalancer, self).__init__(connection, module)
self.connection_ec2 = connection_ec2
# Ansible module parameters specific to NLBs
self.type = 'network'
self.cross_zone_load_balancing = module.params.get('cross_zone_load_balancing')
if self.elb is not None and self.elb['Type'] != 'network':
self.module.fail_json(msg="The load balancer type you are trying to manage is not network. Try elb_application_lb module instead.")
def create_elb(self):
"""
Create a load balancer
:return:
"""
# Required parameters
params = dict()
params['Name'] = self.name
params['Type'] = self.type
# Other parameters
if self.subnets is not None:
params['Subnets'] = self.subnets
if self.subnet_mappings is not None:
params['SubnetMappings'] = self.subnet_mappings
params['Scheme'] = self.scheme
if self.tags:
params['Tags'] = self.tags
try:
self.elb = AWSRetry.jittered_backoff()(self.connection.create_load_balancer)(**params)['LoadBalancers'][0]
self.changed = True
self.new_load_balancer = True
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
if self.wait:
self.wait_for_status(self.elb['LoadBalancerArn'])
def modify_elb_attributes(self):
"""
Update Network ELB attributes if required
:return:
"""
update_attributes = []
if self.cross_zone_load_balancing is not None and str(self.cross_zone_load_balancing).lower() != \
self.elb_attributes['load_balancing_cross_zone_enabled']:
update_attributes.append({'Key': 'load_balancing.cross_zone.enabled', 'Value': str(self.cross_zone_load_balancing).lower()})
if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']:
update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()})
if update_attributes:
try:
AWSRetry.jittered_backoff()(
self.connection.modify_load_balancer_attributes
)(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes)
self.changed = True
except (BotoCoreError, ClientError) as e:
# Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state
if self.new_load_balancer:
AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn'])
self.module.fail_json_aws(e)
def modify_subnets(self):
"""
Modify elb subnets to match module parameters (unsupported for NLB)
:return:
"""
self.module.fail_json(msg='Modifying subnets and elastic IPs is not supported for Network Load Balancer')
class ELBListeners(object):
def __init__(self, connection, module, elb_arn):
self.connection = connection
self.module = module
self.elb_arn = elb_arn
listeners = module.params.get("listeners")
if listeners is not None:
# Remove suboption argspec defaults of None from each listener
listeners = [dict((x, listener_dict[x]) for x in listener_dict if listener_dict[x] is not None) for listener_dict in listeners]
self.listeners = self._ensure_listeners_default_action_has_arn(listeners)
self.current_listeners = self._get_elb_listeners()
self.purge_listeners = module.params.get("purge_listeners")
self.changed = False
def update(self):
"""
Update the listeners for the ELB
:return:
"""
self.current_listeners = self._get_elb_listeners()
def _get_elb_listeners(self):
"""
Get ELB listeners
:return:
"""
try:
listener_paginator = self.connection.get_paginator('describe_listeners')
return (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=self.elb_arn).build_full_result())['Listeners']
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
def _ensure_listeners_default_action_has_arn(self, listeners):
"""
If a listener DefaultAction has been passed with a Target Group Name instead of ARN, lookup the ARN and
replace the name.
:param listeners: a list of listener dicts
:return: the same list of dicts ensuring that each listener DefaultActions dict has TargetGroupArn key. If a TargetGroupName key exists, it is removed.
"""
if not listeners:
listeners = []
fixed_listeners = []
for listener in listeners:
fixed_actions = []
for action in listener['DefaultActions']:
if 'TargetGroupName' in action:
action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection,
self.module,
action['TargetGroupName'])
del action['TargetGroupName']
fixed_actions.append(action)
listener['DefaultActions'] = fixed_actions
fixed_listeners.append(listener)
return fixed_listeners
def compare_listeners(self):
"""
:return:
"""
listeners_to_modify = []
listeners_to_delete = []
listeners_to_add = deepcopy(self.listeners)
# Check each current listener port to see if it's been passed to the module
for current_listener in self.current_listeners:
current_listener_passed_to_module = False
for new_listener in self.listeners[:]:
new_listener['Port'] = int(new_listener['Port'])
if current_listener['Port'] == new_listener['Port']:
current_listener_passed_to_module = True
# Remove what we match so that what is left can be marked as 'to be added'
listeners_to_add.remove(new_listener)
modified_listener = self._compare_listener(current_listener, new_listener)
if modified_listener:
modified_listener['Port'] = current_listener['Port']
modified_listener['ListenerArn'] = current_listener['ListenerArn']
listeners_to_modify.append(modified_listener)
break
# If the current listener was not matched against passed listeners and purge is True, mark for removal
if not current_listener_passed_to_module and self.purge_listeners:
listeners_to_delete.append(current_listener['ListenerArn'])
return listeners_to_add, listeners_to_modify, listeners_to_delete
def _compare_listener(self, current_listener, new_listener):
"""
Compare two listeners.
:param current_listener:
:param new_listener:
:return:
"""
modified_listener = {}
# Port
if current_listener['Port'] != new_listener['Port']:
modified_listener['Port'] = new_listener['Port']
# Protocol
if current_listener['Protocol'] != new_listener['Protocol']:
modified_listener['Protocol'] = new_listener['Protocol']
# If Protocol is HTTPS, check additional attributes
if current_listener['Protocol'] == 'HTTPS' and new_listener['Protocol'] == 'HTTPS':
# Cert
if current_listener['SslPolicy'] != new_listener['SslPolicy']:
modified_listener['SslPolicy'] = new_listener['SslPolicy']
if current_listener['Certificates'][0]['CertificateArn'] != new_listener['Certificates'][0]['CertificateArn']:
modified_listener['Certificates'] = []
modified_listener['Certificates'].append({})
modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn']
elif current_listener['Protocol'] != 'HTTPS' and new_listener['Protocol'] == 'HTTPS':
modified_listener['SslPolicy'] = new_listener['SslPolicy']
modified_listener['Certificates'] = []
modified_listener['Certificates'].append({})
modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn']
# Default action
# Check proper rule format on current listener
if len(current_listener['DefaultActions']) > 1:
for action in current_listener['DefaultActions']:
if 'Order' not in action:
self.module.fail_json(msg="'Order' key not found in actions. "
"installed version of botocore does not support "
"multiple actions, please upgrade botocore to version "
"1.10.30 or higher")
# If the lengths of the actions are the same, we'll have to verify that the
# contents of those actions are the same
if len(current_listener['DefaultActions']) == len(new_listener['DefaultActions']):
# if actions have just one element, compare the contents and then update if
# they're different
if len(current_listener['DefaultActions']) == 1 and len(new_listener['DefaultActions']) == 1:
if current_listener['DefaultActions'] != new_listener['DefaultActions']:
modified_listener['DefaultActions'] = new_listener['DefaultActions']
# if actions have multiple elements, we'll have to order them first before comparing.
# multiple actions will have an 'Order' key for this purpose
else:
current_actions_sorted = sorted(current_listener['DefaultActions'], key=lambda x: x['Order'])
new_actions_sorted = sorted(new_listener['DefaultActions'], key=lambda x: x['Order'])
# the AWS api won't return the client secret, so we'll have to remove it
# or the module will always see the new and current actions as different
# and try to apply the same config
new_actions_sorted_no_secret = []
for action in new_actions_sorted:
# the secret is currently only defined in the oidc config
if action['Type'] == 'authenticate-oidc':
action['AuthenticateOidcConfig'].pop('ClientSecret')
new_actions_sorted_no_secret.append(action)
else:
new_actions_sorted_no_secret.append(action)
if current_actions_sorted != new_actions_sorted_no_secret:
modified_listener['DefaultActions'] = new_listener['DefaultActions']
# If the action lengths are different, then replace with the new actions
else:
modified_listener['DefaultActions'] = new_listener['DefaultActions']
if modified_listener:
return modified_listener
else:
return None
class ELBListener(object):
def __init__(self, connection, module, listener, elb_arn):
"""
:param connection:
:param module:
:param listener:
:param elb_arn:
"""
self.connection = connection
self.module = module
self.listener = listener
self.elb_arn = elb_arn
def add(self):
try:
# Rules is not a valid parameter for create_listener
if 'Rules' in self.listener:
self.listener.pop('Rules')
AWSRetry.jittered_backoff()(self.connection.create_listener)(LoadBalancerArn=self.elb_arn, **self.listener)
except (BotoCoreError, ClientError) as e:
if '"Order", must be one of: Type, TargetGroupArn' in str(e):
self.module.fail_json(msg="installed version of botocore does not support "
"multiple actions, please upgrade botocore to version "
"1.10.30 or higher")
else:
self.module.fail_json_aws(e)
def modify(self):
try:
# Rules is not a valid parameter for modify_listener
if 'Rules' in self.listener:
self.listener.pop('Rules')
AWSRetry.jittered_backoff()(self.connection.modify_listener)(**self.listener)
except (BotoCoreError, ClientError) as e:
if '"Order", must be one of: Type, TargetGroupArn' in str(e):
self.module.fail_json(msg="installed version of botocore does not support "
"multiple actions, please upgrade botocore to version "
"1.10.30 or higher")
else:
self.module.fail_json_aws(e)
def delete(self):
try:
AWSRetry.jittered_backoff()(self.connection.delete_listener)(ListenerArn=self.listener)
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
class ELBListenerRules(object):
def __init__(self, connection, module, elb_arn, listener_rules, listener_port):
self.connection = connection
self.module = module
self.elb_arn = elb_arn
self.rules = self._ensure_rules_action_has_arn(listener_rules)
self.changed = False
# Get listener based on port so we can use ARN
self.current_listener = get_elb_listener(connection, module, elb_arn, listener_port)
self.listener_arn = self.current_listener['ListenerArn']
self.rules_to_add = deepcopy(self.rules)
self.rules_to_modify = []
self.rules_to_delete = []
# If the listener exists (i.e. has an ARN) get rules for the listener
if 'ListenerArn' in self.current_listener:
self.current_rules = self._get_elb_listener_rules()
else:
self.current_rules = []
def _ensure_rules_action_has_arn(self, rules):
"""
If a rule Action has been passed with a Target Group Name instead of ARN, lookup the ARN and
replace the name.
:param rules: a list of rule dicts
:return: the same list of dicts ensuring that each rule Actions dict has TargetGroupArn key. If a TargetGroupName key exists, it is removed.
"""
fixed_rules = []
for rule in rules:
fixed_actions = []
for action in rule['Actions']:
if 'TargetGroupName' in action:
action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection, self.module, action['TargetGroupName'])
del action['TargetGroupName']
fixed_actions.append(action)
rule['Actions'] = fixed_actions
fixed_rules.append(rule)
return fixed_rules
def _get_elb_listener_rules(self):
try:
return AWSRetry.jittered_backoff()(self.connection.describe_rules)(ListenerArn=self.current_listener['ListenerArn'])['Rules']
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
def _compare_condition(self, current_conditions, condition):
"""
:param current_conditions:
:param condition:
:return:
"""
condition_found = False
for current_condition in current_conditions:
if current_condition.get('SourceIpConfig'):
if (current_condition['Field'] == condition['Field'] and
current_condition['SourceIpConfig']['Values'][0] == condition['SourceIpConfig']['Values'][0]):
condition_found = True
break
elif current_condition['Field'] == condition['Field'] and sorted(current_condition['Values']) == sorted(condition['Values']):
condition_found = True
break
return condition_found
def _compare_rule(self, current_rule, new_rule):
"""
:return:
"""
modified_rule = {}
# Priority
if int(current_rule['Priority']) != int(new_rule['Priority']):
modified_rule['Priority'] = new_rule['Priority']
# Actions
# Check proper rule format on current listener
if len(current_rule['Actions']) > 1:
for action in current_rule['Actions']:
if 'Order' not in action:
self.module.fail_json(msg="'Order' key not found in actions. "
"installed version of botocore does not support "
"multiple actions, please upgrade botocore to version "
"1.10.30 or higher")
# If the lengths of the actions are the same, we'll have to verify that the
# contents of those actions are the same
if len(current_rule['Actions']) == len(new_rule['Actions']):
# if actions have just one element, compare the contents and then update if
# they're different
if len(current_rule['Actions']) == 1 and len(new_rule['Actions']) == 1:
if current_rule['Actions'] != new_rule['Actions']:
modified_rule['Actions'] = new_rule['Actions']
# if actions have multiple elements, we'll have to order them first before comparing.
# multiple actions will have an 'Order' key for this purpose
else:
current_actions_sorted = sorted(current_rule['Actions'], key=lambda x: x['Order'])
new_actions_sorted = sorted(new_rule['Actions'], key=lambda x: x['Order'])
# the AWS api won't return the client secret, so we'll have to remove it
# or the module will always see the new and current actions as different
# and try to apply the same config
new_actions_sorted_no_secret = []
for action in new_actions_sorted:
# the secret is currently only defined in the oidc config
if action['Type'] == 'authenticate-oidc':
action['AuthenticateOidcConfig'].pop('ClientSecret')
new_actions_sorted_no_secret.append(action)
else:
new_actions_sorted_no_secret.append(action)
if current_actions_sorted != new_actions_sorted_no_secret:
modified_rule['Actions'] = new_rule['Actions']
# If the action lengths are different, then replace with the new actions
else:
modified_rule['Actions'] = new_rule['Actions']
# Conditions
modified_conditions = []
for condition in new_rule['Conditions']:
if not self._compare_condition(current_rule['Conditions'], condition):
modified_conditions.append(condition)
if modified_conditions:
modified_rule['Conditions'] = modified_conditions
return modified_rule
def compare_rules(self):
"""
:return:
"""
rules_to_modify = []
rules_to_delete = []
rules_to_add = deepcopy(self.rules)
for current_rule in self.current_rules:
current_rule_passed_to_module = False
for new_rule in self.rules[:]:
if current_rule['Priority'] == str(new_rule['Priority']):
current_rule_passed_to_module = True
# Remove what we match so that what is left can be marked as 'to be added'
rules_to_add.remove(new_rule)
modified_rule = self._compare_rule(current_rule, new_rule)
if modified_rule:
modified_rule['Priority'] = int(current_rule['Priority'])
modified_rule['RuleArn'] = current_rule['RuleArn']
modified_rule['Actions'] = new_rule['Actions']
modified_rule['Conditions'] = new_rule['Conditions']
rules_to_modify.append(modified_rule)
break
# If the current rule was not matched against passed rules, mark for removal
if not current_rule_passed_to_module and not current_rule['IsDefault']:
rules_to_delete.append(current_rule['RuleArn'])
return rules_to_add, rules_to_modify, rules_to_delete
class ELBListenerRule(object):
def __init__(self, connection, module, rule, listener_arn):
self.connection = connection
self.module = module
self.rule = rule
self.listener_arn = listener_arn
self.changed = False
def create(self):
"""
Create a listener rule
:return:
"""
try:
self.rule['ListenerArn'] = self.listener_arn
self.rule['Priority'] = int(self.rule['Priority'])
AWSRetry.jittered_backoff()(self.connection.create_rule)(**self.rule)
except (BotoCoreError, ClientError) as e:
if '"Order", must be one of: Type, TargetGroupArn' in str(e):
self.module.fail_json(msg="installed version of botocore does not support "
"multiple actions, please upgrade botocore to version "
"1.10.30 or higher")
else:
self.module.fail_json_aws(e)
self.changed = True
def modify(self):
"""
Modify a listener rule
:return:
"""
try:
del self.rule['Priority']
AWSRetry.jittered_backoff()(self.connection.modify_rule)(**self.rule)
except (BotoCoreError, ClientError) as e:
if '"Order", must be one of: Type, TargetGroupArn' in str(e):
self.module.fail_json(msg="installed version of botocore does not support "
"multiple actions, please upgrade botocore to version "
"1.10.30 or higher")
else:
self.module.fail_json_aws(e)
self.changed = True
def delete(self):
"""
Delete a listener rule
:return:
"""
try:
AWSRetry.jittered_backoff()(self.connection.delete_rule)(RuleArn=self.rule['RuleArn'])
except (BotoCoreError, ClientError) as e:
self.module.fail_json_aws(e)
self.changed = True

@ -1,49 +0,0 @@
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import traceback
try:
from botocore.exceptions import ClientError, NoCredentialsError
except ImportError:
pass # caught by HAS_BOTO3
from ansible.module_utils._text import to_native
def get_aws_account_id(module):
""" Given AnsibleAWSModule instance, get the active AWS account ID
get_account_id tries too find out the account that we are working
on. It's not guaranteed that this will be easy so we try in
several different ways. Giving either IAM or STS privilages to
the account should be enough to permit this.
"""
account_id = None
try:
sts_client = module.client('sts')
account_id = sts_client.get_caller_identity().get('Account')
# non-STS sessions may also get NoCredentialsError from this STS call, so
# we must catch that too and try the IAM version
except (ClientError, NoCredentialsError):
try:
iam_client = module.client('iam')
account_id = iam_client.get_user()['User']['Arn'].split(':')[4]
except ClientError as e:
if (e.response['Error']['Code'] == 'AccessDenied'):
except_msg = to_native(e)
# don't match on `arn:aws` because of China region `arn:aws-cn` and similar
account_id = except_msg.search(r"arn:\w+:iam::([0-9]{12,32}):\w+/").group(1)
if account_id is None:
module.fail_json_aws(e, msg="Could not get AWS account information")
except Exception as e:
module.fail_json(
msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.",
exception=traceback.format_exc()
)
if not account_id:
module.fail_json(msg="Failed while determining AWS account ID. Try allowing sts:GetCallerIdentity or iam:GetUser permissions.")
return to_native(account_id)

@ -1,232 +0,0 @@
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils._text import to_text
from ansible.module_utils.aws.waiters import get_waiter
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
from ansible.module_utils.ec2 import compare_aws_tags, AWSRetry, ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict
try:
from botocore.exceptions import BotoCoreError, ClientError, WaiterError
except ImportError:
pass
from collections import namedtuple
from time import sleep
Boto3ClientMethod = namedtuple('Boto3ClientMethod', ['name', 'waiter', 'operation_description', 'cluster', 'instance'])
# Whitelist boto3 client methods for cluster and instance resources
cluster_method_names = [
'create_db_cluster', 'restore_db_cluster_from_db_snapshot', 'restore_db_cluster_from_s3',
'restore_db_cluster_to_point_in_time', 'modify_db_cluster', 'delete_db_cluster', 'add_tags_to_resource',
'remove_tags_from_resource', 'list_tags_for_resource', 'promote_read_replica_db_cluster'
]
instance_method_names = [
'create_db_instance', 'restore_db_instance_to_point_in_time', 'restore_db_instance_from_s3',
'restore_db_instance_from_db_snapshot', 'create_db_instance_read_replica', 'modify_db_instance',
'delete_db_instance', 'add_tags_to_resource', 'remove_tags_from_resource', 'list_tags_for_resource',
'promote_read_replica', 'stop_db_instance', 'start_db_instance', 'reboot_db_instance'
]
def get_rds_method_attribute(method_name, module):
readable_op = method_name.replace('_', ' ').replace('db', 'DB')
if method_name in cluster_method_names and 'new_db_cluster_identifier' in module.params:
cluster = True
instance = False
if method_name == 'delete_db_cluster':
waiter = 'cluster_deleted'
else:
waiter = 'cluster_available'
elif method_name in instance_method_names and 'new_db_instance_identifier' in module.params:
cluster = False
instance = True
if method_name == 'delete_db_instance':
waiter = 'db_instance_deleted'
elif method_name == 'stop_db_instance':
waiter = 'db_instance_stopped'
else:
waiter = 'db_instance_available'
else:
raise NotImplementedError("method {0} hasn't been added to the list of accepted methods to use a waiter in module_utils/aws/rds.py".format(method_name))
return Boto3ClientMethod(name=method_name, waiter=waiter, operation_description=readable_op, cluster=cluster, instance=instance)
def get_final_identifier(method_name, module):
apply_immediately = module.params['apply_immediately']
if get_rds_method_attribute(method_name, module).cluster:
identifier = module.params['db_cluster_identifier']
updated_identifier = module.params['new_db_cluster_identifier']
elif get_rds_method_attribute(method_name, module).instance:
identifier = module.params['db_instance_identifier']
updated_identifier = module.params['new_db_instance_identifier']
else:
raise NotImplementedError("method {0} hasn't been added to the list of accepted methods in module_utils/aws/rds.py".format(method_name))
if not module.check_mode and updated_identifier and apply_immediately:
identifier = updated_identifier
return identifier
def handle_errors(module, exception, method_name, parameters):
if not isinstance(exception, ClientError):
module.fail_json_aws(exception, msg="Unexpected failure for method {0} with parameters {1}".format(method_name, parameters))
changed = True
error_code = exception.response['Error']['Code']
if method_name == 'modify_db_instance' and error_code == 'InvalidParameterCombination':
if 'No modifications were requested' in to_text(exception):
changed = False
elif 'ModifyDbCluster API' in to_text(exception):
module.fail_json_aws(exception, msg='It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster')
else:
module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
elif method_name == 'promote_read_replica' and error_code == 'InvalidDBInstanceState':
if 'DB Instance is not a read replica' in to_text(exception):
changed = False
else:
module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
elif method_name == 'create_db_instance' and exception.response['Error']['Code'] == 'InvalidParameterValue':
accepted_engines = [
'aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-se',
'oracle-se1', 'oracle-se2', 'postgres', 'sqlserver-ee', 'sqlserver-ex', 'sqlserver-se', 'sqlserver-web'
]
if parameters.get('Engine') not in accepted_engines:
module.fail_json_aws(exception, msg='DB engine {0} should be one of {1}'.format(parameters.get('Engine'), accepted_engines))
else:
module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
else:
module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description))
return changed
def call_method(client, module, method_name, parameters):
result = {}
changed = True
if not module.check_mode:
wait = module.params['wait']
# TODO: stabilize by adding get_rds_method_attribute(method_name).extra_retry_codes
method = getattr(client, method_name)
try:
if method_name == 'modify_db_instance':
# check if instance is in an available state first, if possible
if wait:
wait_for_status(client, module, module.params['db_instance_identifier'], method_name)
result = AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidDBInstanceState'])(method)(**parameters)
else:
result = AWSRetry.jittered_backoff()(method)(**parameters)
except (BotoCoreError, ClientError) as e:
changed = handle_errors(module, e, method_name, parameters)
if wait and changed:
identifier = get_final_identifier(method_name, module)
wait_for_status(client, module, identifier, method_name)
return result, changed
def wait_for_instance_status(client, module, db_instance_id, waiter_name):
def wait(client, db_instance_id, waiter_name, extra_retry_codes):
retry = AWSRetry.jittered_backoff(catch_extra_error_codes=extra_retry_codes)
try:
waiter = client.get_waiter(waiter_name)
except ValueError:
# using a waiter in ansible.module_utils.aws.waiters
waiter = get_waiter(client, waiter_name)
waiter.wait(WaiterConfig={'Delay': 60, 'MaxAttempts': 60}, DBInstanceIdentifier=db_instance_id)
waiter_expected_status = {
'db_instance_deleted': 'deleted',
'db_instance_stopped': 'stopped',
}
expected_status = waiter_expected_status.get(waiter_name, 'available')
if expected_status == 'available':
extra_retry_codes = ['DBInstanceNotFound']
else:
extra_retry_codes = []
for attempt_to_wait in range(0, 10):
try:
wait(client, db_instance_id, waiter_name, extra_retry_codes)
break
except WaiterError as e:
# Instance may be renamed and AWSRetry doesn't handle WaiterError
if e.last_response.get('Error', {}).get('Code') == 'DBInstanceNotFound':
sleep(10)
continue
module.fail_json_aws(e, msg='Error while waiting for DB instance {0} to be {1}'.format(db_instance_id, expected_status))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Unexpected error while waiting for DB instance {0} to be {1}'.format(
db_instance_id, expected_status)
)
def wait_for_cluster_status(client, module, db_cluster_id, waiter_name):
try:
waiter = get_waiter(client, waiter_name).wait(DBClusterIdentifier=db_cluster_id)
except WaiterError as e:
if waiter_name == 'cluster_deleted':
msg = "Failed to wait for DB cluster {0} to be deleted".format(db_cluster_id)
else:
msg = "Failed to wait for DB cluster {0} to be available".format(db_cluster_id)
module.fail_json_aws(e, msg=msg)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_cluster_id))
def wait_for_status(client, module, identifier, method_name):
waiter_name = get_rds_method_attribute(method_name, module).waiter
if get_rds_method_attribute(method_name, module).cluster:
wait_for_cluster_status(client, module, identifier, waiter_name)
elif get_rds_method_attribute(method_name, module).instance:
wait_for_instance_status(client, module, identifier, waiter_name)
else:
raise NotImplementedError("method {0} hasn't been added to the whitelist of handled methods".format(method_name))
def get_tags(client, module, cluster_arn):
try:
return boto3_tag_list_to_ansible_dict(
client.list_tags_for_resource(ResourceName=cluster_arn)['TagList']
)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to describe tags")
def arg_spec_to_rds_params(options_dict):
tags = options_dict.pop('tags')
has_processor_features = False
if 'processor_features' in options_dict:
has_processor_features = True
processor_features = options_dict.pop('processor_features')
camel_options = snake_dict_to_camel_dict(options_dict, capitalize_first=True)
for key in list(camel_options.keys()):
for old, new in (('Db', 'DB'), ('Iam', 'IAM'), ('Az', 'AZ')):
if old in key:
camel_options[key.replace(old, new)] = camel_options.pop(key)
camel_options['Tags'] = tags
if has_processor_features:
camel_options['ProcessorFeatures'] = processor_features
return camel_options
def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags):
if tags is None:
return False
tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, tags, purge_tags)
changed = bool(tags_to_add or tags_to_remove)
if tags_to_add:
call_method(
client, module, method_name='add_tags_to_resource',
parameters={'ResourceName': resource_arn, 'Tags': ansible_dict_to_boto3_tag_list(tags_to_add)}
)
if tags_to_remove:
call_method(
client, module, method_name='remove_tags_from_resource',
parameters={'ResourceName': resource_arn, 'TagKeys': tags_to_remove}
)
return changed

@ -1,50 +0,0 @@
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # Handled by the calling module
HAS_MD5 = True
try:
from hashlib import md5
except ImportError:
try:
from md5 import md5
except ImportError:
HAS_MD5 = False
def calculate_etag(module, filename, etag, s3, bucket, obj, version=None):
if not HAS_MD5:
return None
if '-' in etag:
# Multi-part ETag; a hash of the hashes of each part.
parts = int(etag[1:-1].split('-')[1])
digests = []
s3_kwargs = dict(
Bucket=bucket,
Key=obj,
)
if version:
s3_kwargs['VersionId'] = version
with open(filename, 'rb') as f:
for part_num in range(1, parts + 1):
s3_kwargs['PartNumber'] = part_num
try:
head = s3.head_object(**s3_kwargs)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to get head object")
digests.append(md5(f.read(int(head['ContentLength']))))
digest_squared = md5(b''.join(m.digest() for m in digests))
return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests))
else: # Compute the MD5 sum normally
return '"{0}"'.format(module.md5(filename))

@ -1,210 +0,0 @@
# Copyright: (c) 2018, Aaron Haaf <aabonh@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import hashlib
import hmac
import operator
from ansible.module_utils.urls import open_url
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, HAS_BOTO3
from ansible.module_utils.six.moves.urllib.parse import urlencode
try:
from boto3 import session
except ImportError:
pass
def hexdigest(s):
"""
Returns the sha256 hexdigest of a string after encoding.
"""
return hashlib.sha256(s.encode("utf-8")).hexdigest()
def format_querystring(params=None):
"""
Returns properly url-encoded query string from the provided params dict.
It's specially sorted for cannonical requests
"""
if not params:
return ""
# Query string values must be URL-encoded (space=%20). The parameters must be sorted by name.
return urlencode(sorted(params.items(), operator.itemgetter(0)))
# Key derivation functions. See:
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
def sign(key, msg):
'''
Return digest for key applied to msg
'''
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def get_signature_key(key, dateStamp, regionName, serviceName):
'''
Returns signature key for AWS resource
'''
kDate = sign(("AWS4" + key).encode("utf-8"), dateStamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(kService, "aws4_request")
return kSigning
def get_aws_credentials_object(module):
'''
Returns aws_access_key_id, aws_secret_access_key, session_token for a module.
'''
if not HAS_BOTO3:
module.fail_json("get_aws_credentials_object requires boto3")
dummy, dummy, boto_params = get_aws_connection_info(module, boto3=True)
s = session.Session(**boto_params)
return s.get_credentials()
# Reference: https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html
def signed_request(
module=None,
method="GET", service=None, host=None, uri=None,
query=None, body="", headers=None,
session_in_header=True, session_in_query=False
):
"""Generate a SigV4 request to an AWS resource for a module
This is used if you wish to authenticate with AWS credentials to a secure endpoint like an elastisearch domain.
Returns :class:`HTTPResponse` object.
Example:
result = signed_request(
module=this,
service="es",
host="search-recipes1-xxxxxxxxx.us-west-2.es.amazonaws.com",
)
:kwarg host: endpoint to talk to
:kwarg service: AWS id of service (like `ec2` or `es`)
:kwarg module: An AnsibleAWSModule to gather connection info from
:kwarg body: (optional) Payload to send
:kwarg method: (optional) HTTP verb to use
:kwarg query: (optional) dict of query params to handle
:kwarg uri: (optional) Resource path without query parameters
:kwarg session_in_header: (optional) Add the session token to the headers
:kwarg session_in_query: (optional) Add the session token to the query parameters
:returns: HTTPResponse
"""
if not HAS_BOTO3:
module.fail_json("A sigv4 signed_request requires boto3")
# "Constants"
t = datetime.datetime.utcnow()
amz_date = t.strftime("%Y%m%dT%H%M%SZ")
datestamp = t.strftime("%Y%m%d") # Date w/o time, used in credential scope
algorithm = "AWS4-HMAC-SHA256"
# AWS stuff
region, dummy, dummy = get_aws_connection_info(module, boto3=True)
credentials = get_aws_credentials_object(module)
access_key = credentials.access_key
secret_key = credentials.secret_key
session_token = credentials.token
if not access_key:
module.fail_json(msg="aws_access_key_id is missing")
if not secret_key:
module.fail_json(msg="aws_secret_access_key is missing")
credential_scope = "/".join([datestamp, region, service, "aws4_request"])
# Argument Defaults
uri = uri or "/"
query_string = format_querystring(query) if query else ""
headers = headers or dict()
query = query or dict()
headers.update({
"host": host,
"x-amz-date": amz_date,
})
# Handle adding of session_token if present
if session_token:
if session_in_header:
headers["X-Amz-Security-Token"] = session_token
if session_in_query:
query["X-Amz-Security-Token"] = session_token
if method == "GET":
body = ""
# Derived data
body_hash = hexdigest(body)
signed_headers = ";".join(sorted(headers.keys()))
# Setup Cannonical request to generate auth token
cannonical_headers = "\n".join([
key.lower().strip() + ":" + value for key, value in headers.items()
]) + "\n" # Note additional trailing newline
cannonical_request = "\n".join([
method,
uri,
query_string,
cannonical_headers,
signed_headers,
body_hash,
])
string_to_sign = "\n".join([algorithm, amz_date, credential_scope, hexdigest(cannonical_request)])
# Sign the Cannonical request
signing_key = get_signature_key(secret_key, datestamp, region, service)
signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
# Make auth header with that info
authorization_header = "{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}".format(
algorithm, access_key, credential_scope, signed_headers, signature
)
# PERFORM THE REQUEST!
url = "https://" + host + uri
if query_string != "":
url = url + "?" + query_string
final_headers = {
"x-amz-date": amz_date,
"Authorization": authorization_header,
}
final_headers.update(headers)
return open_url(url, method=method, data=body, headers=final_headers)

@ -1,222 +0,0 @@
# Copyright (c) 2017 Will Thames
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This module adds shared support for Web Application Firewall modules
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, AWSRetry
from ansible.module_utils.aws.waiters import get_waiter
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
MATCH_LOOKUP = {
'byte': {
'method': 'byte_match_set',
'conditionset': 'ByteMatchSet',
'conditiontuple': 'ByteMatchTuple',
'type': 'ByteMatch'
},
'geo': {
'method': 'geo_match_set',
'conditionset': 'GeoMatchSet',
'conditiontuple': 'GeoMatchConstraint',
'type': 'GeoMatch'
},
'ip': {
'method': 'ip_set',
'conditionset': 'IPSet',
'conditiontuple': 'IPSetDescriptor',
'type': 'IPMatch'
},
'regex': {
'method': 'regex_match_set',
'conditionset': 'RegexMatchSet',
'conditiontuple': 'RegexMatchTuple',
'type': 'RegexMatch'
},
'size': {
'method': 'size_constraint_set',
'conditionset': 'SizeConstraintSet',
'conditiontuple': 'SizeConstraint',
'type': 'SizeConstraint'
},
'sql': {
'method': 'sql_injection_match_set',
'conditionset': 'SqlInjectionMatchSet',
'conditiontuple': 'SqlInjectionMatchTuple',
'type': 'SqlInjectionMatch',
},
'xss': {
'method': 'xss_match_set',
'conditionset': 'XssMatchSet',
'conditiontuple': 'XssMatchTuple',
'type': 'XssMatch'
},
}
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_rule_with_backoff(client, rule_id):
return client.get_rule(RuleId=rule_id)['Rule']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_byte_match_set_with_backoff(client, byte_match_set_id):
return client.get_byte_match_set(ByteMatchSetId=byte_match_set_id)['ByteMatchSet']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_ip_set_with_backoff(client, ip_set_id):
return client.get_ip_set(IPSetId=ip_set_id)['IPSet']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_size_constraint_set_with_backoff(client, size_constraint_set_id):
return client.get_size_constraint_set(SizeConstraintSetId=size_constraint_set_id)['SizeConstraintSet']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_sql_injection_match_set_with_backoff(client, sql_injection_match_set_id):
return client.get_sql_injection_match_set(SqlInjectionMatchSetId=sql_injection_match_set_id)['SqlInjectionMatchSet']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_xss_match_set_with_backoff(client, xss_match_set_id):
return client.get_xss_match_set(XssMatchSetId=xss_match_set_id)['XssMatchSet']
def get_rule(client, module, rule_id):
try:
rule = get_rule_with_backoff(client, rule_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain waf rule")
match_sets = {
'ByteMatch': get_byte_match_set_with_backoff,
'IPMatch': get_ip_set_with_backoff,
'SizeConstraint': get_size_constraint_set_with_backoff,
'SqlInjectionMatch': get_sql_injection_match_set_with_backoff,
'XssMatch': get_xss_match_set_with_backoff
}
if 'Predicates' in rule:
for predicate in rule['Predicates']:
if predicate['Type'] in match_sets:
predicate.update(match_sets[predicate['Type']](client, predicate['DataId']))
# replaced by Id from the relevant MatchSet
del(predicate['DataId'])
return rule
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def get_web_acl_with_backoff(client, web_acl_id):
return client.get_web_acl(WebACLId=web_acl_id)['WebACL']
def get_web_acl(client, module, web_acl_id):
try:
web_acl = get_web_acl_with_backoff(client, web_acl_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain web acl")
if web_acl:
try:
for rule in web_acl['Rules']:
rule.update(get_rule(client, module, rule['RuleId']))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain web acl rule")
return camel_dict_to_snake_dict(web_acl)
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_rules_with_backoff(client):
paginator = client.get_paginator('list_rules')
return paginator.paginate().build_full_result()['Rules']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_regional_rules_with_backoff(client):
resp = client.list_rules()
rules = []
while resp:
rules += resp['Rules']
resp = client.list_rules(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None
return rules
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_web_acls_with_backoff(client):
paginator = client.get_paginator('list_web_acls')
return paginator.paginate().build_full_result()['WebACLs']
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_regional_web_acls_with_backoff(client):
resp = client.list_web_acls()
acls = []
while resp:
acls += resp['WebACLs']
resp = client.list_web_acls(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None
return acls
def list_web_acls(client, module):
try:
if client.__class__.__name__ == 'WAF':
return list_web_acls_with_backoff(client)
elif client.__class__.__name__ == 'WAFRegional':
return list_regional_web_acls_with_backoff(client)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain web acls")
def get_change_token(client, module):
try:
token = client.get_change_token()
return token['ChangeToken']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't obtain change token")
@AWSRetry.backoff(tries=10, delay=2, backoff=2.0, catch_extra_error_codes=['WAFStaleDataException'])
def run_func_with_change_token_backoff(client, module, params, func, wait=False):
params['ChangeToken'] = get_change_token(client, module)
result = func(**params)
if wait:
get_waiter(
client, 'change_token_in_sync',
).wait(
ChangeToken=result['ChangeToken']
)
return result

@ -1,405 +0,0 @@
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
import botocore.waiter as core_waiter
except ImportError:
pass # caught by HAS_BOTO3
ec2_data = {
"version": 2,
"waiters": {
"InternetGatewayExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeInternetGateways",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(InternetGateways) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidInternetGatewayID.NotFound",
"state": "retry"
},
]
},
"RouteTableExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeRouteTables",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(RouteTables[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidRouteTableID.NotFound",
"state": "retry"
},
]
},
"SecurityGroupExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSecurityGroups",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(SecurityGroups[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidGroup.NotFound",
"state": "retry"
},
]
},
"SubnetExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(Subnets[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidSubnetID.NotFound",
"state": "retry"
},
]
},
"SubnetHasMapPublic": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": True,
"argument": "Subnets[].MapPublicIpOnLaunch",
"state": "success"
},
]
},
"SubnetNoMapPublic": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": False,
"argument": "Subnets[].MapPublicIpOnLaunch",
"state": "success"
},
]
},
"SubnetHasAssignIpv6": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": True,
"argument": "Subnets[].AssignIpv6AddressOnCreation",
"state": "success"
},
]
},
"SubnetNoAssignIpv6": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "pathAll",
"expected": False,
"argument": "Subnets[].AssignIpv6AddressOnCreation",
"state": "success"
},
]
},
"SubnetDeleted": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeSubnets",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(Subnets[]) > `0`",
"state": "retry"
},
{
"matcher": "error",
"expected": "InvalidSubnetID.NotFound",
"state": "success"
},
]
},
"VpnGatewayExists": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeVpnGateways",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "length(VpnGateways[]) > `0`",
"state": "success"
},
{
"matcher": "error",
"expected": "InvalidVpnGatewayID.NotFound",
"state": "retry"
},
]
},
"VpnGatewayDetached": {
"delay": 5,
"maxAttempts": 40,
"operation": "DescribeVpnGateways",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "VpnGateways[0].State == 'available'",
"state": "success"
},
]
},
}
}
waf_data = {
"version": 2,
"waiters": {
"ChangeTokenInSync": {
"delay": 20,
"maxAttempts": 60,
"operation": "GetChangeTokenStatus",
"acceptors": [
{
"matcher": "path",
"expected": True,
"argument": "ChangeTokenStatus == 'INSYNC'",
"state": "success"
},
{
"matcher": "error",
"expected": "WAFInternalErrorException",
"state": "retry"
}
]
}
}
}
eks_data = {
"version": 2,
"waiters": {
"ClusterActive": {
"delay": 20,
"maxAttempts": 60,
"operation": "DescribeCluster",
"acceptors": [
{
"state": "success",
"matcher": "path",
"argument": "cluster.status",
"expected": "ACTIVE"
},
{
"state": "retry",
"matcher": "error",
"expected": "ResourceNotFoundException"
}
]
},
"ClusterDeleted": {
"delay": 20,
"maxAttempts": 60,
"operation": "DescribeCluster",
"acceptors": [
{
"state": "retry",
"matcher": "path",
"argument": "cluster.status != 'DELETED'",
"expected": True
},
{
"state": "success",
"matcher": "error",
"expected": "ResourceNotFoundException"
}
]
}
}
}
rds_data = {
"version": 2,
"waiters": {
"DBInstanceStopped": {
"delay": 20,
"maxAttempts": 60,
"operation": "DescribeDBInstances",
"acceptors": [
{
"state": "success",
"matcher": "pathAll",
"argument": "DBInstances[].DBInstanceStatus",
"expected": "stopped"
},
]
}
}
}
def ec2_model(name):
ec2_models = core_waiter.WaiterModel(waiter_config=ec2_data)
return ec2_models.get_waiter(name)
def waf_model(name):
waf_models = core_waiter.WaiterModel(waiter_config=waf_data)
return waf_models.get_waiter(name)
def eks_model(name):
eks_models = core_waiter.WaiterModel(waiter_config=eks_data)
return eks_models.get_waiter(name)
def rds_model(name):
rds_models = core_waiter.WaiterModel(waiter_config=rds_data)
return rds_models.get_waiter(name)
waiters_by_name = {
('EC2', 'internet_gateway_exists'): lambda ec2: core_waiter.Waiter(
'internet_gateway_exists',
ec2_model('InternetGatewayExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_internet_gateways
)),
('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter(
'route_table_exists',
ec2_model('RouteTableExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_route_tables
)),
('EC2', 'security_group_exists'): lambda ec2: core_waiter.Waiter(
'security_group_exists',
ec2_model('SecurityGroupExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_security_groups
)),
('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter(
'subnet_exists',
ec2_model('SubnetExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter(
'subnet_has_map_public',
ec2_model('SubnetHasMapPublic'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter(
'subnet_no_map_public',
ec2_model('SubnetNoMapPublic'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter(
'subnet_has_assign_ipv6',
ec2_model('SubnetHasAssignIpv6'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter(
'subnet_no_assign_ipv6',
ec2_model('SubnetNoAssignIpv6'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter(
'subnet_deleted',
ec2_model('SubnetDeleted'),
core_waiter.NormalizedOperationMethod(
ec2.describe_subnets
)),
('EC2', 'vpn_gateway_exists'): lambda ec2: core_waiter.Waiter(
'vpn_gateway_exists',
ec2_model('VpnGatewayExists'),
core_waiter.NormalizedOperationMethod(
ec2.describe_vpn_gateways
)),
('EC2', 'vpn_gateway_detached'): lambda ec2: core_waiter.Waiter(
'vpn_gateway_detached',
ec2_model('VpnGatewayDetached'),
core_waiter.NormalizedOperationMethod(
ec2.describe_vpn_gateways
)),
('WAF', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
'change_token_in_sync',
waf_model('ChangeTokenInSync'),
core_waiter.NormalizedOperationMethod(
waf.get_change_token_status
)),
('WAFRegional', 'change_token_in_sync'): lambda waf: core_waiter.Waiter(
'change_token_in_sync',
waf_model('ChangeTokenInSync'),
core_waiter.NormalizedOperationMethod(
waf.get_change_token_status
)),
('EKS', 'cluster_active'): lambda eks: core_waiter.Waiter(
'cluster_active',
eks_model('ClusterActive'),
core_waiter.NormalizedOperationMethod(
eks.describe_cluster
)),
('EKS', 'cluster_deleted'): lambda eks: core_waiter.Waiter(
'cluster_deleted',
eks_model('ClusterDeleted'),
core_waiter.NormalizedOperationMethod(
eks.describe_cluster
)),
('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter(
'db_instance_stopped',
rds_model('DBInstanceStopped'),
core_waiter.NormalizedOperationMethod(
rds.describe_db_instances
)),
}
def get_waiter(client, waiter_name):
try:
return waiters_by_name[(client.__class__.__name__, waiter_name)](client)
except KeyError:
raise NotImplementedError("Waiter {0} could not be found for client {1}. Available waiters: {2}".format(
waiter_name, type(client), ', '.join(repr(k) for k in waiters_by_name.keys())))

@ -1,758 +0,0 @@
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import sys
import traceback
from ansible.module_utils.ansible_release import __version__
from ansible.module_utils.basic import missing_required_lib, env_fallback
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.cloud import CloudRetry
from ansible.module_utils.six import string_types, binary_type, text_type
from ansible.module_utils.common.dict_transformations import (
camel_dict_to_snake_dict, snake_dict_to_camel_dict,
_camel_to_snake, _snake_to_camel,
)
BOTO_IMP_ERR = None
try:
import boto
import boto.ec2 # boto does weird import stuff
HAS_BOTO = True
except ImportError:
BOTO_IMP_ERR = traceback.format_exc()
HAS_BOTO = False
BOTO3_IMP_ERR = None
try:
import boto3
import botocore
HAS_BOTO3 = True
except Exception:
BOTO3_IMP_ERR = traceback.format_exc()
HAS_BOTO3 = False
try:
# Although this is to allow Python 3 the ability to use the custom comparison as a key, Python 2.7 also
# uses this (and it works as expected). Python 2.6 will trigger the ImportError.
from functools import cmp_to_key
PY3_COMPARISON = True
except ImportError:
PY3_COMPARISON = False
class AnsibleAWSError(Exception):
pass
def _botocore_exception_maybe():
"""
Allow for boto3 not being installed when using these utils by wrapping
botocore.exceptions instead of assigning from it directly.
"""
if HAS_BOTO3:
return botocore.exceptions.ClientError
return type(None)
class AWSRetry(CloudRetry):
base_class = _botocore_exception_maybe()
@staticmethod
def status_code_from_exception(error):
return error.response['Error']['Code']
@staticmethod
def found(response_code, catch_extra_error_codes=None):
# This list of failures is based on this API Reference
# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
#
# TooManyRequestsException comes from inside botocore when it
# does retrys, unfortunately however it does not try long
# enough to allow some services such as API Gateway to
# complete configuration. At the moment of writing there is a
# botocore/boto3 bug open to fix this.
#
# https://github.com/boto/boto3/issues/876 (and linked PRs etc)
retry_on = [
'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable',
'InternalFailure', 'InternalError', 'TooManyRequestsException',
'Throttling'
]
if catch_extra_error_codes:
retry_on.extend(catch_extra_error_codes)
return response_code in retry_on
def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
try:
return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
except ValueError as e:
module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e))
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError,
botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e:
module.fail_json(msg=to_native(e))
except botocore.exceptions.NoRegionError as e:
module.fail_json(msg="The %s module requires a region and none was found in configuration, "
"environment variables or module parameters" % module._name)
def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
profile = params.pop('profile_name', None)
if conn_type not in ['both', 'resource', 'client']:
raise ValueError('There is an issue in the calling code. You '
'must specify either both, resource, or client to '
'the conn_type parameter in the boto3_conn function '
'call')
config = botocore.config.Config(
user_agent_extra='Ansible/{0}'.format(__version__),
)
if params.get('config') is not None:
config = config.merge(params.pop('config'))
if params.get('aws_config') is not None:
config = config.merge(params.pop('aws_config'))
session = boto3.session.Session(
profile_name=profile,
)
if conn_type == 'resource':
return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
elif conn_type == 'client':
return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params)
else:
client = session.client(resource, region_name=region, endpoint_url=endpoint, **params)
resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params)
return client, resource
boto3_inventory_conn = _boto3_conn
def boto_exception(err):
"""
Extracts the error message from a boto exception.
:param err: Exception from boto
:return: Error message
"""
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err))
else:
error = '%s: %s' % (Exception, err)
return error
def aws_common_argument_spec():
return dict(
debug_botocore_endpoint_logs=dict(fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), default=False, type='bool'),
ec2_url=dict(),
aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
validate_certs=dict(default=True, type='bool'),
security_token=dict(aliases=['access_token'], no_log=True),
profile=dict(),
aws_config=dict(type='dict'),
)
def ec2_argument_spec():
spec = aws_common_argument_spec()
spec.update(
dict(
region=dict(aliases=['aws_region', 'ec2_region']),
)
)
return spec
def get_aws_region(module, boto3=False):
region = module.params.get('region')
if region:
return region
if 'AWS_REGION' in os.environ:
return os.environ['AWS_REGION']
if 'AWS_DEFAULT_REGION' in os.environ:
return os.environ['AWS_DEFAULT_REGION']
if 'EC2_REGION' in os.environ:
return os.environ['EC2_REGION']
if not boto3:
if not HAS_BOTO:
module.fail_json(msg=missing_required_lib('boto'), exception=BOTO_IMP_ERR)
# boto.config.get returns None if config not found
region = boto.config.get('Boto', 'aws_region')
if region:
return region
return boto.config.get('Boto', 'ec2_region')
if not HAS_BOTO3:
module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR)
# here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None.
try:
profile_name = module.params.get('profile')
return botocore.session.Session(profile=profile_name).get_config_variable('region')
except botocore.exceptions.ProfileNotFound as e:
return None
def get_aws_connection_info(module, boto3=False):
# Check module args for credentials, then check environment vars
# access_key
ec2_url = module.params.get('ec2_url')
access_key = module.params.get('aws_access_key')
secret_key = module.params.get('aws_secret_key')
security_token = module.params.get('security_token')
region = get_aws_region(module, boto3)
profile_name = module.params.get('profile')
validate_certs = module.params.get('validate_certs')
config = module.params.get('aws_config')
if not ec2_url:
if 'AWS_URL' in os.environ:
ec2_url = os.environ['AWS_URL']
elif 'EC2_URL' in os.environ:
ec2_url = os.environ['EC2_URL']
if not access_key:
if os.environ.get('AWS_ACCESS_KEY_ID'):
access_key = os.environ['AWS_ACCESS_KEY_ID']
elif os.environ.get('AWS_ACCESS_KEY'):
access_key = os.environ['AWS_ACCESS_KEY']
elif os.environ.get('EC2_ACCESS_KEY'):
access_key = os.environ['EC2_ACCESS_KEY']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_access_key_id'):
access_key = boto.config.get('Credentials', 'aws_access_key_id')
elif HAS_BOTO and boto.config.get('default', 'aws_access_key_id'):
access_key = boto.config.get('default', 'aws_access_key_id')
else:
# in case access_key came in as empty string
access_key = None
if not secret_key:
if os.environ.get('AWS_SECRET_ACCESS_KEY'):
secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
elif os.environ.get('AWS_SECRET_KEY'):
secret_key = os.environ['AWS_SECRET_KEY']
elif os.environ.get('EC2_SECRET_KEY'):
secret_key = os.environ['EC2_SECRET_KEY']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_secret_access_key'):
secret_key = boto.config.get('Credentials', 'aws_secret_access_key')
elif HAS_BOTO and boto.config.get('default', 'aws_secret_access_key'):
secret_key = boto.config.get('default', 'aws_secret_access_key')
else:
# in case secret_key came in as empty string
secret_key = None
if not security_token:
if os.environ.get('AWS_SECURITY_TOKEN'):
security_token = os.environ['AWS_SECURITY_TOKEN']
elif os.environ.get('AWS_SESSION_TOKEN'):
security_token = os.environ['AWS_SESSION_TOKEN']
elif os.environ.get('EC2_SECURITY_TOKEN'):
security_token = os.environ['EC2_SECURITY_TOKEN']
elif HAS_BOTO and boto.config.get('Credentials', 'aws_security_token'):
security_token = boto.config.get('Credentials', 'aws_security_token')
elif HAS_BOTO and boto.config.get('default', 'aws_security_token'):
security_token = boto.config.get('default', 'aws_security_token')
else:
# in case secret_token came in as empty string
security_token = None
if HAS_BOTO3 and boto3:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
aws_session_token=security_token)
boto_params['verify'] = validate_certs
if profile_name:
boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None)
boto_params['profile_name'] = profile_name
else:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
security_token=security_token)
# only set profile_name if passed as an argument
if profile_name:
boto_params['profile_name'] = profile_name
boto_params['validate_certs'] = validate_certs
if config is not None:
if HAS_BOTO3 and boto3:
boto_params['aws_config'] = botocore.config.Config(**config)
elif HAS_BOTO and not boto3:
if 'user_agent' in config:
sys.modules["boto.connection"].UserAgent = config['user_agent']
for param, value in boto_params.items():
if isinstance(value, binary_type):
boto_params[param] = text_type(value, 'utf-8', 'strict')
return region, ec2_url, boto_params
def get_ec2_creds(module):
''' for compatibility mode with old modules that don't/can't yet
use ec2_connect method '''
region, ec2_url, boto_params = get_aws_connection_info(module)
return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
def boto_fix_security_token_in_profile(conn, profile_name):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + profile_name
if boto.config.has_option(profile, 'aws_security_token'):
conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
return conn
def connect_to_aws(aws_module, region, **params):
try:
conn = aws_module.connect_to_region(region, **params)
except(boto.provider.ProfileNotFoundError):
raise AnsibleAWSError("Profile given for AWS was not found. Please fix and retry.")
if not conn:
if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade "
"boto or extend with endpoints_path" % (region, aws_module.__name__))
else:
raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
if params.get('profile_name'):
conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
return conn
def ec2_connect(module):
""" Return an ec2 connection"""
region, ec2_url, boto_params = get_aws_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **boto_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
elif ec2_url:
try:
ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError, boto.provider.ProfileNotFoundError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
return ec2
def ansible_dict_to_boto3_filter_list(filters_dict):
""" Convert an Ansible dict of filters to list of dicts that boto3 can use
Args:
filters_dict (dict): Dict of AWS filters.
Basic Usage:
>>> filters = {'some-aws-id': 'i-01234567'}
>>> ansible_dict_to_boto3_filter_list(filters)
{
'some-aws-id': 'i-01234567'
}
Returns:
List: List of AWS filters and their values
[
{
'Name': 'some-aws-id',
'Values': [
'i-01234567',
]
}
]
"""
filters_list = []
for k, v in filters_dict.items():
filter_dict = {'Name': k}
if isinstance(v, string_types):
filter_dict['Values'] = [v]
else:
filter_dict['Values'] = v
filters_list.append(filter_dict)
return filters_list
def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None):
""" Convert a boto3 list of resource tags to a flat dict of key:value pairs
Args:
tags_list (list): List of dicts representing AWS tags.
tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
Basic Usage:
>>> tags_list = [{'Key': 'MyTagKey', 'Value': 'MyTagValue'}]
>>> boto3_tag_list_to_ansible_dict(tags_list)
[
{
'Key': 'MyTagKey',
'Value': 'MyTagValue'
}
]
Returns:
Dict: Dict of key:value pairs representing AWS tags
{
'MyTagKey': 'MyTagValue',
}
"""
if tag_name_key_name and tag_value_key_name:
tag_candidates = {tag_name_key_name: tag_value_key_name}
else:
tag_candidates = {'key': 'value', 'Key': 'Value'}
if not tags_list:
return {}
for k, v in tag_candidates.items():
if k in tags_list[0] and v in tags_list[0]:
return dict((tag[k], tag[v]) for tag in tags_list)
raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list)))
def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'):
""" Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts
Args:
tags_dict (dict): Dict representing AWS resource tags.
tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key")
tag_value_key_name (str): Value to use as the key for all tag values (useful because boto3 doesn't always use "Value")
Basic Usage:
>>> tags_dict = {'MyTagKey': 'MyTagValue'}
>>> ansible_dict_to_boto3_tag_list(tags_dict)
{
'MyTagKey': 'MyTagValue'
}
Returns:
List: List of dicts containing tag keys and values
[
{
'Key': 'MyTagKey',
'Value': 'MyTagValue'
}
]
"""
tags_list = []
for k, v in tags_dict.items():
tags_list.append({tag_name_key_name: k, tag_value_key_name: to_native(v)})
return tags_list
def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=True):
""" Return list of security group IDs from security group names. Note that security group names are not unique
across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This
will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in
a try block
"""
def get_sg_name(sg, boto3):
if boto3:
return sg['GroupName']
else:
return sg.name
def get_sg_id(sg, boto3):
if boto3:
return sg['GroupId']
else:
return sg.id
sec_group_id_list = []
if isinstance(sec_group_list, string_types):
sec_group_list = [sec_group_list]
# Get all security groups
if boto3:
if vpc_id:
filters = [
{
'Name': 'vpc-id',
'Values': [
vpc_id,
]
}
]
all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups']
else:
all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups']
else:
if vpc_id:
filters = {'vpc-id': vpc_id}
all_sec_groups = ec2_connection.get_all_security_groups(filters=filters)
else:
all_sec_groups = ec2_connection.get_all_security_groups()
unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups)
sec_group_name_list = list(set(sec_group_list) - set(unmatched))
if len(unmatched) > 0:
# If we have unmatched names that look like an ID, assume they are
import re
sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)]
still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)]
if len(still_unmatched) > 0:
raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched))
sec_group_id_list += [str(get_sg_id(all_sg, boto3)) for all_sg in all_sec_groups if str(get_sg_name(all_sg, boto3)) in sec_group_name_list]
return sec_group_id_list
def _hashable_policy(policy, policy_list):
"""
Takes a policy and returns a list, the contents of which are all hashable and sorted.
Example input policy:
{'Version': '2012-10-17',
'Statement': [{'Action': 's3:PutObjectAcl',
'Sid': 'AddCannedAcl2',
'Resource': 'arn:aws:s3:::test_policy/*',
'Effect': 'Allow',
'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
}]}
Returned value:
[('Statement', ((('Action', (u's3:PutObjectAcl',)),
('Effect', (u'Allow',)),
('Principal', ('AWS', ((u'arn:aws:iam::XXXXXXXXXXXX:user/username1',), (u'arn:aws:iam::XXXXXXXXXXXX:user/username2',)))),
('Resource', (u'arn:aws:s3:::test_policy/*',)), ('Sid', (u'AddCannedAcl2',)))),
('Version', (u'2012-10-17',)))]
"""
# Amazon will automatically convert bool and int to strings for us
if isinstance(policy, bool):
return tuple([str(policy).lower()])
elif isinstance(policy, int):
return tuple([str(policy)])
if isinstance(policy, list):
for each in policy:
tupleified = _hashable_policy(each, [])
if isinstance(tupleified, list):
tupleified = tuple(tupleified)
policy_list.append(tupleified)
elif isinstance(policy, string_types) or isinstance(policy, binary_type):
policy = to_text(policy)
# convert root account ARNs to just account IDs
if policy.startswith('arn:aws:iam::') and policy.endswith(':root'):
policy = policy.split(':')[4]
return [policy]
elif isinstance(policy, dict):
sorted_keys = list(policy.keys())
sorted_keys.sort()
for key in sorted_keys:
tupleified = _hashable_policy(policy[key], [])
if isinstance(tupleified, list):
tupleified = tuple(tupleified)
policy_list.append((key, tupleified))
# ensure we aren't returning deeply nested structures of length 1
if len(policy_list) == 1 and isinstance(policy_list[0], tuple):
policy_list = policy_list[0]
if isinstance(policy_list, list):
if PY3_COMPARISON:
policy_list.sort(key=cmp_to_key(py3cmp))
else:
policy_list.sort()
return policy_list
def py3cmp(a, b):
""" Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3."""
try:
if a > b:
return 1
elif a < b:
return -1
else:
return 0
except TypeError as e:
# check to see if they're tuple-string
# always say strings are less than tuples (to maintain compatibility with python2)
str_ind = to_text(e).find('str')
tup_ind = to_text(e).find('tuple')
if -1 not in (str_ind, tup_ind):
if str_ind < tup_ind:
return -1
elif tup_ind < str_ind:
return 1
raise
def compare_policies(current_policy, new_policy):
""" Compares the existing policy and the updated policy
Returns True if there is a difference between policies.
"""
return set(_hashable_policy(new_policy, [])) != set(_hashable_policy(current_policy, []))
def sort_json_policy_dict(policy_dict):
""" Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but
different orders will return true
Args:
policy_dict (dict): Dict representing IAM JSON policy.
Basic Usage:
>>> my_iam_policy = {'Principle': {'AWS':["31","7","14","101"]}
>>> sort_json_policy_dict(my_iam_policy)
Returns:
Dict: Will return a copy of the policy as a Dict but any List will be sorted
{
'Principle': {
'AWS': [ '7', '14', '31', '101' ]
}
}
"""
def value_is_list(my_list):
checked_list = []
for item in my_list:
if isinstance(item, dict):
checked_list.append(sort_json_policy_dict(item))
elif isinstance(item, list):
checked_list.append(value_is_list(item))
else:
checked_list.append(item)
# Sort list. If it's a list of dictionaries, sort by tuple of key-value
# pairs, since Python 3 doesn't allow comparisons such as `<` between dictionaries.
checked_list.sort(key=lambda x: sorted(x.items()) if isinstance(x, dict) else x)
return checked_list
ordered_policy_dict = {}
for key, value in policy_dict.items():
if isinstance(value, dict):
ordered_policy_dict[key] = sort_json_policy_dict(value)
elif isinstance(value, list):
ordered_policy_dict[key] = value_is_list(value)
else:
ordered_policy_dict[key] = value
return ordered_policy_dict
def map_complex_type(complex_type, type_map):
"""
Allows to cast elements within a dictionary to a specific type
Example of usage:
DEPLOYMENT_CONFIGURATION_TYPE_MAP = {
'maximum_percent': 'int',
'minimum_healthy_percent': 'int'
}
deployment_configuration = map_complex_type(module.params['deployment_configuration'],
DEPLOYMENT_CONFIGURATION_TYPE_MAP)
This ensures all keys within the root element are casted and valid integers
"""
if complex_type is None:
return
new_type = type(complex_type)()
if isinstance(complex_type, dict):
for key in complex_type:
if key in type_map:
if isinstance(type_map[key], list):
new_type[key] = map_complex_type(
complex_type[key],
type_map[key][0])
else:
new_type[key] = map_complex_type(
complex_type[key],
type_map[key])
else:
return complex_type
elif isinstance(complex_type, list):
for i in range(len(complex_type)):
new_type.append(map_complex_type(
complex_type[i],
type_map))
elif type_map:
return globals()['__builtins__'][type_map](complex_type)
return new_type
def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True):
"""
Compare two dicts of AWS tags. Dicts are expected to of been created using 'boto3_tag_list_to_ansible_dict' helper function.
Two dicts are returned - the first is tags to be set, the second is any tags to remove. Since the AWS APIs differ
these may not be able to be used out of the box.
:param current_tags_dict:
:param new_tags_dict:
:param purge_tags:
:return: tag_key_value_pairs_to_set: a dict of key value pairs that need to be set in AWS. If all tags are identical this dict will be empty
:return: tag_keys_to_unset: a list of key names (type str) that need to be unset in AWS. If no tags need to be unset this list will be empty
"""
tag_key_value_pairs_to_set = {}
tag_keys_to_unset = []
for key in current_tags_dict.keys():
if key not in new_tags_dict and purge_tags:
tag_keys_to_unset.append(key)
for key in set(new_tags_dict.keys()) - set(tag_keys_to_unset):
if to_text(new_tags_dict[key]) != current_tags_dict.get(key):
tag_key_value_pairs_to_set[key] = new_tags_dict[key]
return tag_key_value_pairs_to_set, tag_keys_to_unset

@ -1,110 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = '''
module: aws_az_info
short_description: Gather information about availability zones in AWS.
description:
- Gather information about availability zones in AWS.
- This module was called C(aws_az_facts) before Ansible 2.9. The usage did not change.
version_added: '2.5'
author: 'Henrique Rodrigues (@Sodki)'
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) for
possible filters. Filter names and values are case sensitive. You can also use underscores
instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
required: false
default: {}
type: dict
extends_documentation_fragment:
- aws
- ec2
requirements: [botocore, boto3]
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all availability zones
- aws_az_info:
# Gather information about a single availability zone
- aws_az_info:
filters:
zone-name: eu-west-1a
'''
RETURN = '''
availability_zones:
returned: on success
description: >
Availability zones that match the provided filters. Each element consists of a dict with all the information
related to that available zone.
type: list
sample: "[
{
'messages': [],
'region_name': 'us-west-1',
'state': 'available',
'zone_name': 'us-west-1b'
},
{
'messages': [],
'region_name': 'us-west-1',
'state': 'available',
'zone_name': 'us-west-1c'
}
]"
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import AWSRetry, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # Handled by AnsibleAWSModule
def main():
argument_spec = dict(
filters=dict(default={}, type='dict')
)
module = AnsibleAWSModule(argument_spec=argument_spec)
if module._name == 'aws_az_facts':
module.deprecate("The 'aws_az_facts' module has been renamed to 'aws_az_info'", version='2.14')
connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
# Replace filter key underscores with dashes, for compatibility
sanitized_filters = dict((k.replace('_', '-'), v) for k, v in module.params.get('filters').items())
try:
availability_zones = connection.describe_availability_zones(
Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Unable to describe availability zones.")
# Turn the boto3 result into ansible_friendly_snaked_names
snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones['AvailabilityZones']]
module.exit_json(availability_zones=snaked_availability_zones)
if __name__ == '__main__':
main()

@ -1,114 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aws_caller_info
short_description: Get information about the user and account being used to make AWS calls.
description:
- This module returns information about the account and user / role from which the AWS access tokens originate.
- The primary use of this is to get the account id for templating into ARNs or similar to avoid needing to specify this information in inventory.
- This module was called C(aws_caller_facts) before Ansible 2.9. The usage did not change.
version_added: "2.6"
author:
- Ed Costello (@orthanc)
- Stijn Dubrul (@sdubrul)
requirements: [ 'botocore', 'boto3' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Get the current caller identity information
aws_caller_info:
register: caller_info
'''
RETURN = '''
account:
description: The account id the access credentials are associated with.
returned: success
type: str
sample: "123456789012"
account_alias:
description: The account alias the access credentials are associated with.
returned: when caller has the iam:ListAccountAliases permission
type: str
sample: "acme-production"
arn:
description: The arn identifying the user the credentials are associated with.
returned: success
type: str
sample: arn:aws:sts::123456789012:federated-user/my-federated-user-name
user_id:
description: |
The user id the access credentials are associated with. Note that this may not correspond to
anything you can look up in the case of roles or federated identities.
returned: success
type: str
sample: 123456789012:my-federated-user-name
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # caught by AnsibleAWSModule
def main():
module = AnsibleAWSModule(
argument_spec={},
supports_check_mode=True,
)
if module._name == 'aws_caller_facts':
module.deprecate("The 'aws_caller_facts' module has been renamed to 'aws_caller_info'", version='2.13')
client = module.client('sts')
try:
caller_info = client.get_caller_identity()
caller_info.pop('ResponseMetadata', None)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to retrieve caller identity')
iam_client = module.client('iam')
try:
# Although a list is returned by list_account_aliases AWS supports maximum one alias per account.
# If an alias is defined it will be returned otherwise a blank string is filled in as account_alias.
# see https://docs.aws.amazon.com/cli/latest/reference/iam/list-account-aliases.html#output
response = iam_client.list_account_aliases()
if response and response['AccountAliases']:
caller_info['account_alias'] = response['AccountAliases'][0]
else:
caller_info['account_alias'] = ''
except (BotoCoreError, ClientError) as e:
# The iam:ListAccountAliases permission is required for this operation to succeed.
# Lacking this permission is handled gracefully by not returning the account_alias.
pass
module.exit_json(
changed=False,
**camel_dict_to_snake_dict(caller_info))
if __name__ == '__main__':
main()

@ -1,925 +0,0 @@
#!/usr/bin/python
# This file is part of Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: aws_s3
short_description: manage objects in S3.
description:
- This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and
deleting both objects and buckets, retrieving objects as files or strings and generating download links.
This module has a dependency on boto3 and botocore.
notes:
- In 2.4, this module has been renamed from C(s3) into M(aws_s3).
version_added: "1.1"
options:
bucket:
description:
- Bucket name.
required: true
type: str
dest:
description:
- The destination file path when downloading an object/key with a GET operation.
version_added: "1.3"
type: path
encrypt:
description:
- When set for PUT mode, asks for server-side encryption.
default: true
version_added: "2.0"
type: bool
encryption_mode:
description:
- What encryption mode to use if I(encrypt=true).
default: AES256
choices:
- AES256
- aws:kms
version_added: "2.7"
type: str
expiry:
description:
- Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a I(mode=put) or I(mode=geturl) operation.
default: 600
aliases: ['expiration']
type: int
headers:
description:
- Custom headers for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
version_added: "2.0"
type: dict
marker:
description:
- Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
version_added: "2.0"
type: str
max_keys:
description:
- Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
default: 1000
version_added: "2.0"
type: int
metadata:
description:
- Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
version_added: "1.6"
type: dict
mode:
description:
- Switches the module behaviour between put (upload), get (download), geturl (return download url, Ansible 1.3+),
getstr (download object as string (1.3+)), list (list keys, Ansible 2.0+), create (bucket), delete (bucket),
and delobj (delete object, Ansible 2.0+).
required: true
choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list']
type: str
object:
description:
- Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
type: str
permission:
description:
- This option lets the user set the canned permissions on the object/bucket that are created.
The permissions that can be set are C(private), C(public-read), C(public-read-write), C(authenticated-read) for a bucket or
C(private), C(public-read), C(public-read-write), C(aws-exec-read), C(authenticated-read), C(bucket-owner-read),
C(bucket-owner-full-control) for an object. Multiple permissions can be specified as a list.
default: ['private']
version_added: "2.0"
type: list
elements: str
prefix:
description:
- Limits the response to keys that begin with the specified prefix for list mode.
default: ""
version_added: "2.0"
type: str
version:
description:
- Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
version_added: "2.0"
type: str
overwrite:
description:
- Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
Boolean or one of [always, never, different], true is equal to 'always' and false is equal to 'never', new in 2.0.
When this is set to 'different', the md5 sum of the local file is compared with the 'ETag' of the object/key in S3.
The ETag may or may not be an MD5 digest of the object data. See the ETag response header here
U(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html)
default: 'always'
aliases: ['force']
version_added: "1.2"
type: str
retries:
description:
- On recoverable failure, how many times to retry before actually failing.
default: 0
version_added: "2.0"
type: int
aliases: ['retry']
s3_url:
description:
- S3 URL endpoint for usage with Ceph, Eucalyptus and fakes3 etc. Otherwise assumes AWS.
aliases: [ S3_URL ]
type: str
dualstack:
description:
- Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6.
- Requires at least botocore version 1.4.45.
type: bool
default: false
version_added: "2.7"
rgw:
description:
- Enable Ceph RGW S3 support. This option requires an explicit url via I(s3_url).
default: false
version_added: "2.2"
type: bool
src:
description:
- The source file path when performing a PUT operation.
version_added: "1.3"
type: str
ignore_nonexistent_bucket:
description:
- "Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the
GetObject permission but no other permissions. In this case using the option mode: get will fail without specifying
I(ignore_nonexistent_bucket=true)."
version_added: "2.3"
type: bool
encryption_kms_key_id:
description:
- KMS key id to use when encrypting objects using I(encrypting=aws:kms). Ignored if I(encryption) is not C(aws:kms)
version_added: "2.7"
type: str
requirements: [ "boto3", "botocore" ]
author:
- "Lester Wade (@lwade)"
- "Sloane Hertel (@s-hertel)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Simple PUT operation
aws_s3:
bucket: mybucket
object: /my/desired/key.txt
src: /usr/local/myfile.txt
mode: put
- name: Simple PUT operation in Ceph RGW S3
aws_s3:
bucket: mybucket
object: /my/desired/key.txt
src: /usr/local/myfile.txt
mode: put
rgw: true
s3_url: "http://localhost:8000"
- name: Simple GET operation
aws_s3:
bucket: mybucket
object: /my/desired/key.txt
dest: /usr/local/myfile.txt
mode: get
- name: Get a specific version of an object.
aws_s3:
bucket: mybucket
object: /my/desired/key.txt
version: 48c9ee5131af7a716edc22df9772aa6f
dest: /usr/local/myfile.txt
mode: get
- name: PUT/upload with metadata
aws_s3:
bucket: mybucket
object: /my/desired/key.txt
src: /usr/local/myfile.txt
mode: put
metadata: 'Content-Encoding=gzip,Cache-Control=no-cache'
- name: PUT/upload with custom headers
aws_s3:
bucket: mybucket
object: /my/desired/key.txt
src: /usr/local/myfile.txt
mode: put
headers: 'x-amz-grant-full-control=emailAddress=owner@example.com'
- name: List keys simple
aws_s3:
bucket: mybucket
mode: list
- name: List keys all options
aws_s3:
bucket: mybucket
mode: list
prefix: /my/desired/
marker: /my/desired/0023.txt
max_keys: 472
- name: Create an empty bucket
aws_s3:
bucket: mybucket
mode: create
permission: public-read
- name: Create a bucket with key as directory, in the EU region
aws_s3:
bucket: mybucket
object: /my/directory/path
mode: create
region: eu-west-1
- name: Delete a bucket and all contents
aws_s3:
bucket: mybucket
mode: delete
- name: GET an object but don't download if the file checksums match. New in 2.0
aws_s3:
bucket: mybucket
object: /my/desired/key.txt
dest: /usr/local/myfile.txt
mode: get
overwrite: different
- name: Delete an object from a bucket
aws_s3:
bucket: mybucket
object: /my/desired/key.txt
mode: delobj
'''
RETURN = '''
msg:
description: Message indicating the status of the operation.
returned: always
type: str
sample: PUT operation complete
url:
description: URL of the object.
returned: (for put and geturl operations)
type: str
sample: https://my-bucket.s3.amazonaws.com/my-key.txt?AWSAccessKeyId=<access-key>&Expires=1506888865&Signature=<signature>
expiry:
description: Number of seconds the presigned url is valid for.
returned: (for geturl operation)
type: int
sample: 600
contents:
description: Contents of the object as string.
returned: (for getstr operation)
type: str
sample: "Hello, world!"
s3_keys:
description: List of object keys.
returned: (for list operation)
type: list
elements: str
sample:
- prefix1/
- prefix1/key1
- prefix1/key2
'''
import mimetypes
import os
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ssl import SSLError
from ansible.module_utils.basic import to_text, to_native
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.aws.s3 import calculate_etag, HAS_MD5
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
try:
import botocore
except ImportError:
pass # will be detected by imported AnsibleAWSModule
IGNORE_S3_DROP_IN_EXCEPTIONS = ['XNotImplemented', 'NotImplemented']
class Sigv4Required(Exception):
pass
def key_check(module, s3, bucket, obj, version=None, validate=True):
exists = True
try:
if version:
s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
else:
s3.head_object(Bucket=bucket, Key=obj)
except botocore.exceptions.ClientError as e:
# if a client error is thrown, check if it's a 404 error
# if it's a 404 error, then the object does not exist
error_code = int(e.response['Error']['Code'])
if error_code == 404:
exists = False
elif error_code == 403 and validate is False:
pass
else:
module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
except botocore.exceptions.BotoCoreError as e:
module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
return exists
def etag_compare(module, local_file, s3, bucket, obj, version=None):
s3_etag = get_etag(s3, bucket, obj, version=version)
local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version)
return s3_etag == local_etag
def get_etag(s3, bucket, obj, version=None):
if version:
key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
else:
key_check = s3.head_object(Bucket=bucket, Key=obj)
if not key_check:
return None
return key_check['ETag']
def bucket_check(module, s3, bucket, validate=True):
exists = True
try:
s3.head_bucket(Bucket=bucket)
except botocore.exceptions.ClientError as e:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
exists = False
elif error_code == 403 and validate is False:
pass
else:
module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
except botocore.exceptions.EndpointConnectionError as e:
module.fail_json_aws(e, msg="Invalid endpoint provided")
except botocore.exceptions.BotoCoreError as e:
module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
return exists
def create_bucket(module, s3, bucket, location=None):
if module.check_mode:
module.exit_json(msg="CREATE operation skipped - running in check mode", changed=True)
configuration = {}
if location not in ('us-east-1', None):
configuration['LocationConstraint'] = location
try:
if len(configuration) > 0:
s3.create_bucket(Bucket=bucket, CreateBucketConfiguration=configuration)
else:
s3.create_bucket(Bucket=bucket)
if module.params.get('permission'):
# Wait for the bucket to exist before setting ACLs
s3.get_waiter('bucket_exists').wait(Bucket=bucket)
for acl in module.params.get('permission'):
s3.put_bucket_acl(ACL=acl, Bucket=bucket)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
module.warn("PutBucketAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
else:
module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).")
except botocore.exceptions.BotoCoreError as e:
module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).")
if bucket:
return True
def paginated_list(s3, **pagination_params):
pg = s3.get_paginator('list_objects_v2')
for page in pg.paginate(**pagination_params):
yield [data['Key'] for data in page.get('Contents', [])]
def paginated_versioned_list_with_fallback(s3, **pagination_params):
try:
versioned_pg = s3.get_paginator('list_object_versions')
for page in versioned_pg.paginate(**pagination_params):
delete_markers = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('DeleteMarkers', [])]
current_objects = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('Versions', [])]
yield delete_markers + current_objects
except botocore.exceptions.ClientError as e:
if to_text(e.response['Error']['Code']) in IGNORE_S3_DROP_IN_EXCEPTIONS + ['AccessDenied']:
for page in paginated_list(s3, **pagination_params):
yield [{'Key': data['Key']} for data in page]
def list_keys(module, s3, bucket, prefix, marker, max_keys):
pagination_params = {'Bucket': bucket}
for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)):
pagination_params[param_name] = param_value
try:
keys = sum(paginated_list(s3, **pagination_params), [])
module.exit_json(msg="LIST operation complete", s3_keys=keys)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed while listing the keys in the bucket {0}".format(bucket))
def delete_bucket(module, s3, bucket):
if module.check_mode:
module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
try:
exists = bucket_check(module, s3, bucket)
if exists is False:
return False
# if there are contents then we need to delete them before we can delete the bucket
for keys in paginated_versioned_list_with_fallback(s3, Bucket=bucket):
if keys:
s3.delete_objects(Bucket=bucket, Delete={'Objects': keys})
s3.delete_bucket(Bucket=bucket)
return True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed while deleting bucket %s." % bucket)
def delete_key(module, s3, bucket, obj):
if module.check_mode:
module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
try:
s3.delete_object(Bucket=bucket, Key=obj)
module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed while trying to delete %s." % obj)
def create_dirkey(module, s3, bucket, obj, encrypt):
if module.check_mode:
module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
try:
params = {'Bucket': bucket, 'Key': obj, 'Body': b''}
if encrypt:
params['ServerSideEncryption'] = module.params['encryption_mode']
if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
params['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
s3.put_object(**params)
for acl in module.params.get('permission'):
s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning")
else:
module.fail_json_aws(e, msg="Failed while creating object %s." % obj)
except botocore.exceptions.BotoCoreError as e:
module.fail_json_aws(e, msg="Failed while creating object %s." % obj)
module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket), changed=True)
def path_check(path):
if os.path.exists(path):
return True
else:
return False
def option_in_extra_args(option):
temp_option = option.replace('-', '').lower()
allowed_extra_args = {'acl': 'ACL', 'cachecontrol': 'CacheControl', 'contentdisposition': 'ContentDisposition',
'contentencoding': 'ContentEncoding', 'contentlanguage': 'ContentLanguage',
'contenttype': 'ContentType', 'expires': 'Expires', 'grantfullcontrol': 'GrantFullControl',
'grantread': 'GrantRead', 'grantreadacp': 'GrantReadACP', 'grantwriteacp': 'GrantWriteACP',
'metadata': 'Metadata', 'requestpayer': 'RequestPayer', 'serversideencryption': 'ServerSideEncryption',
'storageclass': 'StorageClass', 'ssecustomeralgorithm': 'SSECustomerAlgorithm', 'ssecustomerkey': 'SSECustomerKey',
'ssecustomerkeymd5': 'SSECustomerKeyMD5', 'ssekmskeyid': 'SSEKMSKeyId', 'websiteredirectlocation': 'WebsiteRedirectLocation'}
if temp_option in allowed_extra_args:
return allowed_extra_args[temp_option]
def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers):
if module.check_mode:
module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
try:
extra = {}
if encrypt:
extra['ServerSideEncryption'] = module.params['encryption_mode']
if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
extra['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
if metadata:
extra['Metadata'] = {}
# determine object metadata and extra arguments
for option in metadata:
extra_args_option = option_in_extra_args(option)
if extra_args_option is not None:
extra[extra_args_option] = metadata[option]
else:
extra['Metadata'][option] = metadata[option]
if 'ContentType' not in extra:
content_type = mimetypes.guess_type(src)[0]
if content_type is None:
# s3 default content type
content_type = 'binary/octet-stream'
extra['ContentType'] = content_type
s3.upload_file(Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to complete PUT operation.")
try:
for acl in module.params.get('permission'):
s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
module.warn("PutObjectAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
else:
module.fail_json_aws(e, msg="Unable to set object ACL")
except botocore.exceptions.BotoCoreError as e:
module.fail_json_aws(e, msg="Unable to set object ACL")
try:
url = s3.generate_presigned_url(ClientMethod='put_object',
Params={'Bucket': bucket, 'Key': obj},
ExpiresIn=expiry)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to generate presigned URL")
module.exit_json(msg="PUT operation complete", url=url, changed=True)
def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
if module.check_mode:
module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
# retries is the number of loops; range/xrange needs to be one
# more to get that count of loops.
try:
if version:
key = s3.get_object(Bucket=bucket, Key=obj, VersionId=version)
else:
key = s3.get_object(Bucket=bucket, Key=obj)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e):
raise Sigv4Required()
elif e.response['Error']['Code'] not in ("403", "404"):
# AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but
# user does not have the s3:GetObject permission. 404 errors are handled by download_file().
module.fail_json_aws(e, msg="Could not find the key %s." % obj)
except botocore.exceptions.BotoCoreError as e:
module.fail_json_aws(e, msg="Could not find the key %s." % obj)
optional_kwargs = {'ExtraArgs': {'VersionId': version}} if version else {}
for x in range(0, retries + 1):
try:
s3.download_file(bucket, obj, dest, **optional_kwargs)
module.exit_json(msg="GET operation complete", changed=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
# actually fail on last pass through the loop.
if x >= retries:
module.fail_json_aws(e, msg="Failed while downloading %s." % obj)
# otherwise, try again, this may be a transient timeout.
except SSLError as e: # will ClientError catch SSLError?
# actually fail on last pass through the loop.
if x >= retries:
module.fail_json_aws(e, msg="s3 download failed")
# otherwise, try again, this may be a transient timeout.
def download_s3str(module, s3, bucket, obj, version=None, validate=True):
if module.check_mode:
module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
try:
if version:
contents = to_native(s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read())
else:
contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read())
module.exit_json(msg="GET operation complete", contents=contents, changed=True)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e):
raise Sigv4Required()
else:
module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
except botocore.exceptions.BotoCoreError as e:
module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
def get_download_url(module, s3, bucket, obj, expiry, changed=True):
try:
url = s3.generate_presigned_url(ClientMethod='get_object',
Params={'Bucket': bucket, 'Key': obj},
ExpiresIn=expiry)
module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed while getting download url.")
def is_fakes3(s3_url):
""" Return True if s3_url has scheme fakes3:// """
if s3_url is not None:
return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
else:
return False
def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=False):
if s3_url and rgw: # TODO - test this
rgw = urlparse(s3_url)
params = dict(module=module, conn_type='client', resource='s3', use_ssl=rgw.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
elif is_fakes3(s3_url):
fakes3 = urlparse(s3_url)
port = fakes3.port
if fakes3.scheme == 'fakes3s':
protocol = "https"
if port is None:
port = 443
else:
protocol = "http"
if port is None:
port = 80
params = dict(module=module, conn_type='client', resource='s3', region=location,
endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
else:
params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms':
params['config'] = botocore.client.Config(signature_version='s3v4')
elif module.params['mode'] in ('get', 'getstr') and sig_4:
params['config'] = botocore.client.Config(signature_version='s3v4')
if module.params['dualstack']:
dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True})
if 'config' in params:
params['config'] = params['config'].merge(dualconf)
else:
params['config'] = dualconf
return boto3_conn(**params)
def main():
argument_spec = dict(
bucket=dict(required=True),
dest=dict(default=None, type='path'),
encrypt=dict(default=True, type='bool'),
encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'),
expiry=dict(default=600, type='int', aliases=['expiration']),
headers=dict(type='dict'),
marker=dict(default=""),
max_keys=dict(default=1000, type='int'),
metadata=dict(type='dict'),
mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
object=dict(),
permission=dict(type='list', default=['private']),
version=dict(default=None),
overwrite=dict(aliases=['force'], default='always'),
prefix=dict(default=""),
retries=dict(aliases=['retry'], type='int', default=0),
s3_url=dict(aliases=['S3_URL']),
dualstack=dict(default='no', type='bool'),
rgw=dict(default='no', type='bool'),
src=dict(),
ignore_nonexistent_bucket=dict(default=False, type='bool'),
encryption_kms_key_id=dict()
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[['mode', 'put', ['src', 'object']],
['mode', 'get', ['dest', 'object']],
['mode', 'getstr', ['object']],
['mode', 'geturl', ['object']]],
)
bucket = module.params.get('bucket')
encrypt = module.params.get('encrypt')
expiry = module.params.get('expiry')
dest = module.params.get('dest', '')
headers = module.params.get('headers')
marker = module.params.get('marker')
max_keys = module.params.get('max_keys')
metadata = module.params.get('metadata')
mode = module.params.get('mode')
obj = module.params.get('object')
version = module.params.get('version')
overwrite = module.params.get('overwrite')
prefix = module.params.get('prefix')
retries = module.params.get('retries')
s3_url = module.params.get('s3_url')
dualstack = module.params.get('dualstack')
rgw = module.params.get('rgw')
src = module.params.get('src')
ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket')
object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"]
bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"]
if overwrite not in ['always', 'never', 'different']:
if module.boolean(overwrite):
overwrite = 'always'
else:
overwrite = 'never'
if overwrite == 'different' and not HAS_MD5:
module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if region in ('us-east-1', '', None):
# default to US Standard region
location = 'us-east-1'
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
if module.params.get('object'):
obj = module.params['object']
# If there is a top level object, do nothing - if the object starts with /
# remove the leading character to maintain compatibility with Ansible versions < 2.4
if obj.startswith('/'):
obj = obj[1:]
# Bucket deletion does not require obj. Prevents ambiguity with delobj.
if obj and mode == "delete":
module.fail_json(msg='Parameter obj cannot be used with mode=delete')
# allow eucarc environment variables to be used if ansible vars aren't set
if not s3_url and 'S3_URL' in os.environ:
s3_url = os.environ['S3_URL']
if dualstack and s3_url is not None and 'amazonaws.com' not in s3_url:
module.fail_json(msg='dualstack only applies to AWS S3')
if dualstack and not module.botocore_at_least('1.4.45'):
module.fail_json(msg='dualstack requires botocore >= 1.4.45')
# rgw requires an explicit url
if rgw and not s3_url:
module.fail_json(msg='rgw flavour requires s3_url')
# Look at s3_url and tweak connection settings
# if connecting to RGW, Walrus or fakes3
if s3_url:
for key in ['validate_certs', 'security_token', 'profile_name']:
aws_connect_kwargs.pop(key, None)
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url)
validate = not ignore_nonexistent_bucket
# separate types of ACLs
bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl]
object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl]
error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl]
if error_acl:
module.fail_json(msg='Unknown permission specified: %s' % error_acl)
# First, we check to see if the bucket exists, we get "bucket" returned.
bucketrtn = bucket_check(module, s3, bucket, validate=validate)
if validate and mode not in ('create', 'put', 'delete') and not bucketrtn:
module.fail_json(msg="Source bucket cannot be found.")
if mode == 'get':
keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
if keyrtn is False:
if version:
module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
else:
module.fail_json(msg="Key %s does not exist." % obj)
if path_check(dest) and overwrite != 'always':
if overwrite == 'never':
module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False)
if etag_compare(module, dest, s3, bucket, obj, version=version):
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
try:
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
except Sigv4Required:
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
if mode == 'put':
# if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
# these were separated into the variables bucket_acl and object_acl above
if not path_check(src):
module.fail_json(msg="Local object for PUT does not exist")
if bucketrtn:
keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
else:
# If the bucket doesn't exist we should create it.
# only use valid bucket acls for create_bucket function
module.params['permission'] = bucket_acl
create_bucket(module, s3, bucket, location)
if keyrtn and overwrite != 'always':
if overwrite == 'never' or etag_compare(module, src, s3, bucket, obj):
# Return the download URL for the existing object
get_download_url(module, s3, bucket, obj, expiry, changed=False)
# only use valid object acls for the upload_s3file function
module.params['permission'] = object_acl
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
# Delete an object from a bucket, not the entire bucket
if mode == 'delobj':
if obj is None:
module.fail_json(msg="object parameter is required")
if bucket:
deletertn = delete_key(module, s3, bucket, obj)
if deletertn is True:
module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True)
else:
module.fail_json(msg="Bucket parameter is required.")
# Delete an entire bucket, including all objects in the bucket
if mode == 'delete':
if bucket:
deletertn = delete_bucket(module, s3, bucket)
if deletertn is True:
module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True)
else:
module.fail_json(msg="Bucket parameter is required.")
# Support for listing a set of keys
if mode == 'list':
exists = bucket_check(module, s3, bucket)
# If the bucket does not exist then bail out
if not exists:
module.fail_json(msg="Target bucket (%s) cannot be found" % bucket)
list_keys(module, s3, bucket, prefix, marker, max_keys)
# Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
# WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
if mode == 'create':
# if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified
# these were separated above into the variables bucket_acl and object_acl
if bucket and not obj:
if bucketrtn:
module.exit_json(msg="Bucket already exists.", changed=False)
else:
# only use valid bucket acls when creating the bucket
module.params['permission'] = bucket_acl
module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
if bucket and obj:
if obj.endswith('/'):
dirobj = obj
else:
dirobj = obj + "/"
if bucketrtn:
if key_check(module, s3, bucket, dirobj):
module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
else:
# setting valid object acls for the create_dirkey function
module.params['permission'] = object_acl
create_dirkey(module, s3, bucket, dirobj, encrypt)
else:
# only use valid bucket acls for the create_bucket function
module.params['permission'] = bucket_acl
created = create_bucket(module, s3, bucket, location)
# only use valid object acls for the create_dirkey function
module.params['permission'] = object_acl
create_dirkey(module, s3, bucket, dirobj, encrypt)
# Support for grabbing the time-expired URL for an object in S3/Walrus.
if mode == 'geturl':
if not bucket and not obj:
module.fail_json(msg="Bucket and Object parameters must be set")
keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
if keyrtn:
get_download_url(module, s3, bucket, obj, expiry)
else:
module.fail_json(msg="Key %s does not exist." % obj)
if mode == 'getstr':
if bucket and obj:
keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
if keyrtn:
try:
download_s3str(module, s3, bucket, obj, version=version)
except Sigv4Required:
s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
download_s3str(module, s3, bucket, obj, version=version)
elif version is not None:
module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
else:
module.fail_json(msg="Key %s does not exist." % obj)
module.exit_json(failed=False)
if __name__ == '__main__':
main()

@ -1,837 +0,0 @@
#!/usr/bin/python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: cloudformation
short_description: Create or delete an AWS CloudFormation stack
description:
- Launches or updates an AWS CloudFormation stack and waits for it complete.
notes:
- CloudFormation features change often, and this module tries to keep up. That means your botocore version should be fresh.
The version listed in the requirements is the oldest version that works with the module as a whole.
Some features may require recent versions, and we do not pinpoint a minimum version for each feature.
Instead of relying on the minimum version, keep botocore up to date. AWS is always releasing features and fixing bugs.
version_added: "1.1"
options:
stack_name:
description:
- Name of the CloudFormation stack.
required: true
type: str
disable_rollback:
description:
- If a stacks fails to form, rollback will remove the stack.
default: false
type: bool
on_create_failure:
description:
- Action to take upon failure of stack creation. Incompatible with the I(disable_rollback) option.
choices:
- DO_NOTHING
- ROLLBACK
- DELETE
version_added: "2.8"
type: str
create_timeout:
description:
- The amount of time (in minutes) that can pass before the stack status becomes CREATE_FAILED
version_added: "2.6"
type: int
template_parameters:
description:
- A list of hashes of all the template variables for the stack. The value can be a string or a dict.
- Dict can be used to set additional template parameter attributes like UsePreviousValue (see example).
default: {}
type: dict
state:
description:
- If I(state=present), stack will be created.
- If I(state=present) and if stack exists and template has changed, it will be updated.
- If I(state=absent), stack will be removed.
default: present
choices: [ present, absent ]
type: str
template:
description:
- The local path of the CloudFormation template.
- This must be the full path to the file, relative to the working directory. If using roles this may look
like C(roles/cloudformation/files/cloudformation-example.json).
- If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
must be specified (but only one of them).
- If I(state=present), the stack does exist, and neither I(template),
I(template_body) nor I(template_url) are specified, the previous template will be reused.
type: path
notification_arns:
description:
- A comma separated list of Simple Notification Service (SNS) topic ARNs to publish stack related events.
version_added: "2.0"
type: str
stack_policy:
description:
- The path of the CloudFormation stack policy. A policy cannot be removed once placed, but it can be modified.
for instance, allow all updates U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051)
version_added: "1.9"
type: str
tags:
description:
- Dictionary of tags to associate with stack and its resources during stack creation.
- Can be updated later, updating tags removes previous entries.
version_added: "1.4"
type: dict
template_url:
description:
- Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an
S3 bucket in the same region as the stack.
- If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
must be specified (but only one of them).
- If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url) are specified,
the previous template will be reused.
version_added: "2.0"
type: str
create_changeset:
description:
- "If stack already exists create a changeset instead of directly applying changes. See the AWS Change Sets docs
U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)."
- "WARNING: if the stack does not exist, it will be created without changeset. If I(state=absent), the stack will be
deleted immediately with no changeset."
type: bool
default: false
version_added: "2.4"
changeset_name:
description:
- Name given to the changeset when creating a changeset.
- Only used when I(create_changeset=true).
- By default a name prefixed with Ansible-STACKNAME is generated based on input parameters.
See the AWS Change Sets docs for more information
U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)
version_added: "2.4"
type: str
template_format:
description:
- This parameter is ignored since Ansible 2.3 and will be removed in Ansible 2.14.
- Templates are now passed raw to CloudFormation regardless of format.
version_added: "2.0"
type: str
role_arn:
description:
- The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role
docs U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html)
version_added: "2.3"
type: str
termination_protection:
description:
- Enable or disable termination protection on the stack. Only works with botocore >= 1.7.18.
type: bool
version_added: "2.5"
template_body:
description:
- Template body. Use this to pass in the actual body of the CloudFormation template.
- If I(state=present) and the stack does not exist yet, either I(template), I(template_body) or I(template_url)
must be specified (but only one of them).
- If I(state=present), the stack does exist, and neither I(template), I(template_body) nor I(template_url)
are specified, the previous template will be reused.
version_added: "2.5"
type: str
events_limit:
description:
- Maximum number of CloudFormation events to fetch from a stack when creating or updating it.
default: 200
version_added: "2.7"
type: int
backoff_delay:
description:
- Number of seconds to wait for the next retry.
default: 3
version_added: "2.8"
type: int
required: False
backoff_max_delay:
description:
- Maximum amount of time to wait between retries.
default: 30
version_added: "2.8"
type: int
required: False
backoff_retries:
description:
- Number of times to retry operation.
- AWS API throttling mechanism fails CloudFormation module so we have to retry a couple of times.
default: 10
version_added: "2.8"
type: int
required: False
capabilities:
description:
- Specify capabilities that stack template contains.
- Valid values are C(CAPABILITY_IAM), C(CAPABILITY_NAMED_IAM) and C(CAPABILITY_AUTO_EXPAND).
type: list
elements: str
version_added: "2.8"
default: [ CAPABILITY_IAM, CAPABILITY_NAMED_IAM ]
author: "James S. Martin (@jsmartin)"
extends_documentation_fragment:
- aws
- ec2
requirements: [ boto3, botocore>=1.5.45 ]
'''
EXAMPLES = '''
- name: create a cloudformation stack
cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
disable_rollback: true
template: "files/cloudformation-example.json"
template_parameters:
KeyName: "jmartin"
DiskType: "ephemeral"
InstanceType: "m1.small"
ClusterSize: 3
tags:
Stack: "ansible-cloudformation"
# Basic role example
- name: create a stack, specify role that cloudformation assumes
cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
disable_rollback: true
template: "roles/cloudformation/files/cloudformation-example.json"
role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role'
- name: delete a stack
cloudformation:
stack_name: "ansible-cloudformation-old"
state: "absent"
# Create a stack, pass in template from a URL, disable rollback if stack creation fails,
# pass in some parameters to the template, provide tags for resources created
- name: create a stack, pass in the template via an URL
cloudformation:
stack_name: "ansible-cloudformation"
state: present
region: us-east-1
disable_rollback: true
template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
template_parameters:
KeyName: jmartin
DiskType: ephemeral
InstanceType: m1.small
ClusterSize: 3
tags:
Stack: ansible-cloudformation
# Create a stack, passing in template body using lookup of Jinja2 template, disable rollback if stack creation fails,
# pass in some parameters to the template, provide tags for resources created
- name: create a stack, pass in the template body via lookup template
cloudformation:
stack_name: "ansible-cloudformation"
state: present
region: us-east-1
disable_rollback: true
template_body: "{{ lookup('template', 'cloudformation.j2') }}"
template_parameters:
KeyName: jmartin
DiskType: ephemeral
InstanceType: m1.small
ClusterSize: 3
tags:
Stack: ansible-cloudformation
# Pass a template parameter which uses CloudFormation's UsePreviousValue attribute
# When use_previous_value is set to True, the given value will be ignored and
# CloudFormation will use the value from a previously submitted template.
# If use_previous_value is set to False (default) the given value is used.
- cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
template: "files/cloudformation-example.json"
template_parameters:
DBSnapshotIdentifier:
use_previous_value: True
value: arn:aws:rds:es-east-1:000000000000:snapshot:rds:my-db-snapshot
DBName:
use_previous_value: True
tags:
Stack: "ansible-cloudformation"
# Enable termination protection on a stack.
# If the stack already exists, this will update its termination protection
- name: enable termination protection during stack creation
cloudformation:
stack_name: my_stack
state: present
template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
termination_protection: yes
# Configure TimeoutInMinutes before the stack status becomes CREATE_FAILED
# In this case, if disable_rollback is not set or is set to false, the stack will be rolled back.
- name: enable termination protection during stack creation
cloudformation:
stack_name: my_stack
state: present
template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
create_timeout: 5
# Configure rollback behaviour on the unsuccessful creation of a stack allowing
# CloudFormation to clean up, or do nothing in the event of an unsuccessful
# deployment
# In this case, if on_create_failure is set to "DELETE", it will clean up the stack if
# it fails to create
- name: create stack which will delete on creation failure
cloudformation:
stack_name: my_stack
state: present
template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
on_create_failure: DELETE
'''
RETURN = '''
events:
type: list
description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases.
returned: always
sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"]
log:
description: Debugging logs. Useful when modifying or finding an error.
returned: always
type: list
sample: ["updating stack"]
change_set_id:
description: The ID of the stack change set if one was created
returned: I(state=present) and I(create_changeset=true)
type: str
sample: "arn:aws:cloudformation:us-east-1:012345678901:changeSet/Ansible-StackName-f4496805bd1b2be824d1e315c6884247ede41eb0"
stack_resources:
description: AWS stack resources and their status. List of dictionaries, one dict per resource.
returned: state == present
type: list
sample: [
{
"last_updated_time": "2016-10-11T19:40:14.979000+00:00",
"logical_resource_id": "CFTestSg",
"physical_resource_id": "cloudformation2-CFTestSg-16UQ4CYQ57O9F",
"resource_type": "AWS::EC2::SecurityGroup",
"status": "UPDATE_COMPLETE",
"status_reason": null
}
]
stack_outputs:
type: dict
description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary.
returned: state == present
sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"}
''' # NOQA
import json
import time
import uuid
import traceback
from hashlib import sha1
try:
import boto3
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry, boto3_conn, boto_exception, ec2_argument_spec, get_aws_connection_info
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
def get_stack_events(cfn, stack_name, events_limit, token_filter=None):
'''This event data was never correct, it worked as a side effect. So the v2.3 format is different.'''
ret = {'events': [], 'log': []}
try:
pg = cfn.get_paginator(
'describe_stack_events'
).paginate(
StackName=stack_name,
PaginationConfig={'MaxItems': events_limit}
)
if token_filter is not None:
events = list(pg.search(
"StackEvents[?ClientRequestToken == '{0}']".format(token_filter)
))
else:
events = list(pg.search("StackEvents[*]"))
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
error_msg = boto_exception(err)
if 'does not exist' in error_msg:
# missing stack, don't bail.
ret['log'].append('Stack does not exist.')
return ret
ret['log'].append('Unknown error: ' + str(error_msg))
return ret
for e in events:
eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e)
ret['events'].append(eventline)
if e['ResourceStatus'].endswith('FAILED'):
failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e)
ret['log'].append(failline)
return ret
def create_stack(module, stack_params, cfn, events_limit):
if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
module.fail_json(msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist.")
# 'DisableRollback', 'TimeoutInMinutes', 'EnableTerminationProtection' and
# 'OnFailure' only apply on creation, not update.
if module.params.get('on_create_failure') is not None:
stack_params['OnFailure'] = module.params['on_create_failure']
else:
stack_params['DisableRollback'] = module.params['disable_rollback']
if module.params.get('create_timeout') is not None:
stack_params['TimeoutInMinutes'] = module.params['create_timeout']
if module.params.get('termination_protection') is not None:
if boto_supports_termination_protection(cfn):
stack_params['EnableTerminationProtection'] = bool(module.params.get('termination_protection'))
else:
module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
try:
response = cfn.create_stack(**stack_params)
# Use stack ID to follow stack state in case of on_create_failure = DELETE
result = stack_operation(cfn, response['StackId'], 'CREATE', events_limit, stack_params.get('ClientRequestToken', None))
except Exception as err:
error_msg = boto_exception(err)
module.fail_json(msg="Failed to create stack {0}: {1}.".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc())
if not result:
module.fail_json(msg="empty result")
return result
def list_changesets(cfn, stack_name):
res = cfn.list_change_sets(StackName=stack_name)
return [cs['ChangeSetName'] for cs in res['Summaries']]
def create_changeset(module, stack_params, cfn, events_limit):
if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
module.fail_json(msg="Either 'template' or 'template_url' is required.")
if module.params['changeset_name'] is not None:
stack_params['ChangeSetName'] = module.params['changeset_name']
# changesets don't accept ClientRequestToken parameters
stack_params.pop('ClientRequestToken', None)
try:
changeset_name = build_changeset_name(stack_params)
stack_params['ChangeSetName'] = changeset_name
# Determine if this changeset already exists
pending_changesets = list_changesets(cfn, stack_params['StackName'])
if changeset_name in pending_changesets:
warning = 'WARNING: %d pending changeset(s) exist(s) for this stack!' % len(pending_changesets)
result = dict(changed=False, output='ChangeSet %s already exists.' % changeset_name, warnings=[warning])
else:
cs = cfn.create_change_set(**stack_params)
# Make sure we don't enter an infinite loop
time_end = time.time() + 600
while time.time() < time_end:
try:
newcs = cfn.describe_change_set(ChangeSetName=cs['Id'])
except botocore.exceptions.BotoCoreError as err:
error_msg = boto_exception(err)
module.fail_json(msg=error_msg)
if newcs['Status'] == 'CREATE_PENDING' or newcs['Status'] == 'CREATE_IN_PROGRESS':
time.sleep(1)
elif newcs['Status'] == 'FAILED' and "The submitted information didn't contain changes" in newcs['StatusReason']:
cfn.delete_change_set(ChangeSetName=cs['Id'])
result = dict(changed=False,
output='The created Change Set did not contain any changes to this stack and was deleted.')
# a failed change set does not trigger any stack events so we just want to
# skip any further processing of result and just return it directly
return result
else:
break
# Lets not hog the cpu/spam the AWS API
time.sleep(1)
result = stack_operation(cfn, stack_params['StackName'], 'CREATE_CHANGESET', events_limit)
result['change_set_id'] = cs['Id']
result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']),
'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'],
'NOTE that dependencies on this stack might fail due to pending changes!']
except Exception as err:
error_msg = boto_exception(err)
if 'No updates are to be performed.' in error_msg:
result = dict(changed=False, output='Stack is already up-to-date.')
else:
module.fail_json(msg="Failed to create change set: {0}".format(error_msg), exception=traceback.format_exc())
if not result:
module.fail_json(msg="empty result")
return result
def update_stack(module, stack_params, cfn, events_limit):
if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
stack_params['UsePreviousTemplate'] = True
# if the state is present and the stack already exists, we try to update it.
# AWS will tell us if the stack template and parameters are the same and
# don't need to be updated.
try:
cfn.update_stack(**stack_params)
result = stack_operation(cfn, stack_params['StackName'], 'UPDATE', events_limit, stack_params.get('ClientRequestToken', None))
except Exception as err:
error_msg = boto_exception(err)
if 'No updates are to be performed.' in error_msg:
result = dict(changed=False, output='Stack is already up-to-date.')
else:
module.fail_json(msg="Failed to update stack {0}: {1}".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc())
if not result:
module.fail_json(msg="empty result")
return result
def update_termination_protection(module, cfn, stack_name, desired_termination_protection_state):
'''updates termination protection of a stack'''
if not boto_supports_termination_protection(cfn):
module.fail_json(msg="termination_protection parameter requires botocore >= 1.7.18")
stack = get_stack_facts(cfn, stack_name)
if stack:
if stack['EnableTerminationProtection'] is not desired_termination_protection_state:
try:
cfn.update_termination_protection(
EnableTerminationProtection=desired_termination_protection_state,
StackName=stack_name)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=boto_exception(e), exception=traceback.format_exc())
def boto_supports_termination_protection(cfn):
'''termination protection was added in botocore 1.7.18'''
return hasattr(cfn, "update_termination_protection")
def stack_operation(cfn, stack_name, operation, events_limit, op_token=None):
'''gets the status of a stack while it is created/updated/deleted'''
existed = []
while True:
try:
stack = get_stack_facts(cfn, stack_name)
existed.append('yes')
except Exception:
# If the stack previously existed, and now can't be found then it's
# been deleted successfully.
if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
ret = get_stack_events(cfn, stack_name, events_limit, op_token)
ret.update({'changed': True, 'output': 'Stack Deleted'})
return ret
else:
return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()}
ret = get_stack_events(cfn, stack_name, events_limit, op_token)
if not stack:
if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
ret = get_stack_events(cfn, stack_name, events_limit, op_token)
ret.update({'changed': True, 'output': 'Stack Deleted'})
return ret
else:
ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'})
return ret
# it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE
# Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13
elif stack['StackStatus'].endswith('ROLLBACK_COMPLETE') and operation != 'CREATE_CHANGESET':
ret.update({'changed': True, 'failed': True, 'output': 'Problem with %s. Rollback complete' % operation})
return ret
elif stack['StackStatus'] == 'DELETE_COMPLETE' and operation == 'CREATE':
ret.update({'changed': True, 'failed': True, 'output': 'Stack create failed. Delete complete.'})
return ret
# note the ordering of ROLLBACK_COMPLETE, DELETE_COMPLETE, and COMPLETE, because otherwise COMPLETE will match all cases.
elif stack['StackStatus'].endswith('_COMPLETE'):
ret.update({'changed': True, 'output': 'Stack %s complete' % operation})
return ret
elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'):
ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation})
return ret
# note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases.
elif stack['StackStatus'].endswith('_FAILED'):
ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation})
return ret
else:
# this can loop forever :/
time.sleep(5)
return {'failed': True, 'output': 'Failed for unknown reasons.'}
def build_changeset_name(stack_params):
if 'ChangeSetName' in stack_params:
return stack_params['ChangeSetName']
json_params = json.dumps(stack_params, sort_keys=True)
return 'Ansible-{0}-{1}'.format(
stack_params['StackName'],
sha1(to_bytes(json_params, errors='surrogate_or_strict')).hexdigest()
)
def check_mode_changeset(module, stack_params, cfn):
"""Create a change set, describe it and delete it before returning check mode outputs."""
stack_params['ChangeSetName'] = build_changeset_name(stack_params)
# changesets don't accept ClientRequestToken parameters
stack_params.pop('ClientRequestToken', None)
try:
change_set = cfn.create_change_set(**stack_params)
for i in range(60): # total time 5 min
description = cfn.describe_change_set(ChangeSetName=change_set['Id'])
if description['Status'] in ('CREATE_COMPLETE', 'FAILED'):
break
time.sleep(5)
else:
# if the changeset doesn't finish in 5 mins, this `else` will trigger and fail
module.fail_json(msg="Failed to create change set %s" % stack_params['ChangeSetName'])
cfn.delete_change_set(ChangeSetName=change_set['Id'])
reason = description.get('StatusReason')
if description['Status'] == 'FAILED' and "didn't contain changes" in description['StatusReason']:
return {'changed': False, 'msg': reason, 'meta': description['StatusReason']}
return {'changed': True, 'msg': reason, 'meta': description['Changes']}
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
error_msg = boto_exception(err)
module.fail_json(msg=error_msg, exception=traceback.format_exc())
def get_stack_facts(cfn, stack_name):
try:
stack_response = cfn.describe_stacks(StackName=stack_name)
stack_info = stack_response['Stacks'][0]
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
error_msg = boto_exception(err)
if 'does not exist' in error_msg:
# missing stack, don't bail.
return None
# other error, bail.
raise err
if stack_response and stack_response.get('Stacks', None):
stacks = stack_response['Stacks']
if len(stacks):
stack_info = stacks[0]
return stack_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
stack_name=dict(required=True),
template_parameters=dict(required=False, type='dict', default={}),
state=dict(default='present', choices=['present', 'absent']),
template=dict(default=None, required=False, type='path'),
notification_arns=dict(default=None, required=False),
stack_policy=dict(default=None, required=False),
disable_rollback=dict(default=False, type='bool'),
on_create_failure=dict(default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']),
create_timeout=dict(default=None, type='int'),
template_url=dict(default=None, required=False),
template_body=dict(default=None, required=False),
template_format=dict(removed_in_version='2.14'),
create_changeset=dict(default=False, type='bool'),
changeset_name=dict(default=None, required=False),
role_arn=dict(default=None, required=False),
tags=dict(default=None, type='dict'),
termination_protection=dict(default=None, type='bool'),
events_limit=dict(default=200, type='int'),
backoff_retries=dict(type='int', default=10, required=False),
backoff_delay=dict(type='int', default=3, required=False),
backoff_max_delay=dict(type='int', default=30, required=False),
capabilities=dict(type='list', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['template_url', 'template', 'template_body'],
['disable_rollback', 'on_create_failure']],
supports_check_mode=True
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 and botocore are required for this module')
invalid_capabilities = []
user_capabilities = module.params.get('capabilities')
for user_cap in user_capabilities:
if user_cap not in ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']:
invalid_capabilities.append(user_cap)
if invalid_capabilities:
module.fail_json(msg="Specified capabilities are invalid : %r,"
" please check documentation for valid capabilities" % invalid_capabilities)
# collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
stack_params = {
'Capabilities': user_capabilities,
'ClientRequestToken': to_native(uuid.uuid4()),
}
state = module.params['state']
stack_params['StackName'] = module.params['stack_name']
if module.params['template'] is not None:
with open(module.params['template'], 'r') as template_fh:
stack_params['TemplateBody'] = template_fh.read()
elif module.params['template_body'] is not None:
stack_params['TemplateBody'] = module.params['template_body']
elif module.params['template_url'] is not None:
stack_params['TemplateURL'] = module.params['template_url']
if module.params.get('notification_arns'):
stack_params['NotificationARNs'] = module.params['notification_arns'].split(',')
else:
stack_params['NotificationARNs'] = []
# can't check the policy when verifying.
if module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']:
with open(module.params['stack_policy'], 'r') as stack_policy_fh:
stack_params['StackPolicyBody'] = stack_policy_fh.read()
template_parameters = module.params['template_parameters']
stack_params['Parameters'] = []
for k, v in template_parameters.items():
if isinstance(v, dict):
# set parameter based on a dict to allow additional CFN Parameter Attributes
param = dict(ParameterKey=k)
if 'value' in v:
param['ParameterValue'] = str(v['value'])
if 'use_previous_value' in v and bool(v['use_previous_value']):
param['UsePreviousValue'] = True
param.pop('ParameterValue', None)
stack_params['Parameters'].append(param)
else:
# allow default k/v configuration to set a template parameter
stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)})
if isinstance(module.params.get('tags'), dict):
stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags'])
if module.params.get('role_arn'):
stack_params['RoleARN'] = module.params['role_arn']
result = {}
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
cfn = boto3_conn(module, conn_type='client', resource='cloudformation', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg=boto_exception(e))
# Wrap the cloudformation client methods that this module uses with
# automatic backoff / retry for throttling error codes
backoff_wrapper = AWSRetry.jittered_backoff(
retries=module.params.get('backoff_retries'),
delay=module.params.get('backoff_delay'),
max_delay=module.params.get('backoff_max_delay')
)
cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events)
cfn.create_stack = backoff_wrapper(cfn.create_stack)
cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets)
cfn.create_change_set = backoff_wrapper(cfn.create_change_set)
cfn.update_stack = backoff_wrapper(cfn.update_stack)
cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks)
cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources)
cfn.delete_stack = backoff_wrapper(cfn.delete_stack)
if boto_supports_termination_protection(cfn):
cfn.update_termination_protection = backoff_wrapper(cfn.update_termination_protection)
stack_info = get_stack_facts(cfn, stack_params['StackName'])
if module.check_mode:
if state == 'absent' and stack_info:
module.exit_json(changed=True, msg='Stack would be deleted', meta=[])
elif state == 'absent' and not stack_info:
module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[])
elif state == 'present' and not stack_info:
module.exit_json(changed=True, msg='New stack would be created', meta=[])
else:
module.exit_json(**check_mode_changeset(module, stack_params, cfn))
if state == 'present':
if not stack_info:
result = create_stack(module, stack_params, cfn, module.params.get('events_limit'))
elif module.params.get('create_changeset'):
result = create_changeset(module, stack_params, cfn, module.params.get('events_limit'))
else:
if module.params.get('termination_protection') is not None:
update_termination_protection(module, cfn, stack_params['StackName'],
bool(module.params.get('termination_protection')))
result = update_stack(module, stack_params, cfn, module.params.get('events_limit'))
# format the stack output
stack = get_stack_facts(cfn, stack_params['StackName'])
if stack is not None:
if result.get('stack_outputs') is None:
# always define stack_outputs, but it may be empty
result['stack_outputs'] = {}
for output in stack.get('Outputs', []):
result['stack_outputs'][output['OutputKey']] = output['OutputValue']
stack_resources = []
reslist = cfn.list_stack_resources(StackName=stack_params['StackName'])
for res in reslist.get('StackResourceSummaries', []):
stack_resources.append({
"logical_resource_id": res['LogicalResourceId'],
"physical_resource_id": res.get('PhysicalResourceId', ''),
"resource_type": res['ResourceType'],
"last_updated_time": res['LastUpdatedTimestamp'],
"status": res['ResourceStatus'],
"status_reason": res.get('ResourceStatusReason') # can be blank, apparently
})
result['stack_resources'] = stack_resources
elif state == 'absent':
# absent state is different because of the way delete_stack works.
# problem is it it doesn't give an error if stack isn't found
# so must describe the stack first
try:
stack = get_stack_facts(cfn, stack_params['StackName'])
if not stack:
result = {'changed': False, 'output': 'Stack not found.'}
else:
if stack_params.get('RoleARN') is None:
cfn.delete_stack(StackName=stack_params['StackName'])
else:
cfn.delete_stack(StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN'])
result = stack_operation(cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'),
stack_params.get('ClientRequestToken', None))
except Exception as err:
module.fail_json(msg=boto_exception(err), exception=traceback.format_exc())
module.exit_json(**result)
if __name__ == '__main__':
main()

@ -1,354 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudformation_info
short_description: Obtain information about an AWS CloudFormation stack
description:
- Gets information about an AWS CloudFormation stack.
- This module was called C(cloudformation_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(cloudformation_info) module no longer returns C(ansible_facts)!
requirements:
- boto3 >= 1.0.0
- python >= 2.6
version_added: "2.2"
author:
- Justin Menga (@jmenga)
- Kevin Coming (@waffie1)
options:
stack_name:
description:
- The name or id of the CloudFormation stack. Gathers information on all stacks by default.
type: str
all_facts:
description:
- Get all stack information for the stack.
type: bool
default: false
stack_events:
description:
- Get stack events for the stack.
type: bool
default: false
stack_template:
description:
- Get stack template body for the stack.
type: bool
default: false
stack_resources:
description:
- Get stack resources for the stack.
type: bool
default: false
stack_policy:
description:
- Get stack policy for the stack.
type: bool
default: false
stack_change_sets:
description:
- Get stack change sets for the stack
type: bool
default: false
version_added: '2.10'
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Get summary information about a stack
- cloudformation_info:
stack_name: my-cloudformation-stack
register: output
- debug:
msg: "{{ output['cloudformation']['my-cloudformation-stack'] }}"
# When the module is called as cloudformation_facts, return values are published
# in ansible_facts['cloudformation'][<stack_name>] and can be used as follows.
# Note that this is deprecated and will stop working in Ansible 2.13.
- cloudformation_facts:
stack_name: my-cloudformation-stack
- debug:
msg: "{{ ansible_facts['cloudformation']['my-cloudformation-stack'] }}"
# Get stack outputs, when you have the stack name available as a fact
- set_fact:
stack_name: my-awesome-stack
- cloudformation_info:
stack_name: "{{ stack_name }}"
register: my_stack
- debug:
msg: "{{ my_stack.cloudformation[stack_name].stack_outputs }}"
# Get all stack information about a stack
- cloudformation_info:
stack_name: my-cloudformation-stack
all_facts: true
# Get stack resource and stack policy information about a stack
- cloudformation_info:
stack_name: my-cloudformation-stack
stack_resources: true
stack_policy: true
# Fail if the stack doesn't exist
- name: try to get facts about a stack but fail if it doesn't exist
cloudformation_info:
stack_name: nonexistent-stack
all_facts: yes
failed_when: cloudformation['nonexistent-stack'] is undefined
'''
RETURN = '''
stack_description:
description: Summary facts about the stack
returned: if the stack exists
type: dict
stack_outputs:
description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each
output 'OutputValue' parameter
returned: if the stack exists
type: dict
sample:
ApplicationDatabaseName: dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com
stack_parameters:
description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of
each parameter 'ParameterValue' parameter
returned: if the stack exists
type: dict
sample:
DatabaseEngine: mysql
DatabasePassword: "***"
stack_events:
description: All stack events for the stack
returned: only if all_facts or stack_events is true and the stack exists
type: list
stack_policy:
description: Describes the stack policy for the stack
returned: only if all_facts or stack_policy is true and the stack exists
type: dict
stack_template:
description: Describes the stack template for the stack
returned: only if all_facts or stack_template is true and the stack exists
type: dict
stack_resource_list:
description: Describes stack resources for the stack
returned: only if all_facts or stack_resourses is true and the stack exists
type: list
stack_resources:
description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each
resource 'PhysicalResourceId' parameter
returned: only if all_facts or stack_resourses is true and the stack exists
type: dict
sample:
AutoScalingGroup: "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7"
AutoScalingSecurityGroup: "sg-abcd1234"
ApplicationDatabase: "dazvlpr01xj55a"
stack_change_sets:
description: A list of stack change sets. Each item in the list represents the details of a specific changeset
returned: only if all_facts or stack_change_sets is true and the stack exists
type: list
'''
import json
import traceback
from functools import partial
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import (camel_dict_to_snake_dict, AWSRetry, boto3_tag_list_to_ansible_dict)
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
class CloudFormationServiceManager:
"""Handles CloudFormation Services"""
def __init__(self, module):
self.module = module
self.client = module.client('cloudformation')
@AWSRetry.exponential_backoff(retries=5, delay=5)
def describe_stacks_with_backoff(self, **kwargs):
paginator = self.client.get_paginator('describe_stacks')
return paginator.paginate(**kwargs).build_full_result()['Stacks']
def describe_stacks(self, stack_name=None):
try:
kwargs = {'StackName': stack_name} if stack_name else {}
response = self.describe_stacks_with_backoff(**kwargs)
if response is not None:
return response
self.module.fail_json(msg="Error describing stack(s) - an empty response was returned")
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
if 'does not exist' in e.response['Error']['Message']:
# missing stack, don't bail.
return {}
self.module.fail_json_aws(e, msg="Error describing stack " + stack_name)
@AWSRetry.exponential_backoff(retries=5, delay=5)
def list_stack_resources_with_backoff(self, stack_name):
paginator = self.client.get_paginator('list_stack_resources')
return paginator.paginate(StackName=stack_name).build_full_result()['StackResourceSummaries']
def list_stack_resources(self, stack_name):
try:
return self.list_stack_resources_with_backoff(stack_name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Error listing stack resources for stack " + stack_name)
@AWSRetry.exponential_backoff(retries=5, delay=5)
def describe_stack_events_with_backoff(self, stack_name):
paginator = self.client.get_paginator('describe_stack_events')
return paginator.paginate(StackName=stack_name).build_full_result()['StackEvents']
def describe_stack_events(self, stack_name):
try:
return self.describe_stack_events_with_backoff(stack_name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Error listing stack events for stack " + stack_name)
@AWSRetry.exponential_backoff(retries=5, delay=5)
def list_stack_change_sets_with_backoff(self, stack_name):
paginator = self.client.get_paginator('list_change_sets')
return paginator.paginate(StackName=stack_name).build_full_result()['Summaries']
@AWSRetry.exponential_backoff(retries=5, delay=5)
def describe_stack_change_set_with_backoff(self, **kwargs):
paginator = self.client.get_paginator('describe_change_set')
return paginator.paginate(**kwargs).build_full_result()
def describe_stack_change_sets(self, stack_name):
changes = []
try:
change_sets = self.list_stack_change_sets_with_backoff(stack_name)
for item in change_sets:
changes.append(self.describe_stack_change_set_with_backoff(
StackName=stack_name,
ChangeSetName=item['ChangeSetName']))
return changes
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Error describing stack change sets for stack " + stack_name)
@AWSRetry.exponential_backoff(retries=5, delay=5)
def get_stack_policy_with_backoff(self, stack_name):
return self.client.get_stack_policy(StackName=stack_name)
def get_stack_policy(self, stack_name):
try:
response = self.get_stack_policy_with_backoff(stack_name)
stack_policy = response.get('StackPolicyBody')
if stack_policy:
return json.loads(stack_policy)
return dict()
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Error getting stack policy for stack " + stack_name)
@AWSRetry.exponential_backoff(retries=5, delay=5)
def get_template_with_backoff(self, stack_name):
return self.client.get_template(StackName=stack_name)
def get_template(self, stack_name):
try:
response = self.get_template_with_backoff(stack_name)
return response.get('TemplateBody')
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Error getting stack template for stack " + stack_name)
def to_dict(items, key, value):
''' Transforms a list of items to a Key/Value dictionary '''
if items:
return dict(zip([i.get(key) for i in items], [i.get(value) for i in items]))
else:
return dict()
def main():
argument_spec = dict(
stack_name=dict(),
all_facts=dict(required=False, default=False, type='bool'),
stack_policy=dict(required=False, default=False, type='bool'),
stack_events=dict(required=False, default=False, type='bool'),
stack_resources=dict(required=False, default=False, type='bool'),
stack_template=dict(required=False, default=False, type='bool'),
stack_change_sets=dict(required=False, default=False, type='bool'),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
is_old_facts = module._name == 'cloudformation_facts'
if is_old_facts:
module.deprecate("The 'cloudformation_facts' module has been renamed to 'cloudformation_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
service_mgr = CloudFormationServiceManager(module)
if is_old_facts:
result = {'ansible_facts': {'cloudformation': {}}}
else:
result = {'cloudformation': {}}
for stack_description in service_mgr.describe_stacks(module.params.get('stack_name')):
facts = {'stack_description': stack_description}
stack_name = stack_description.get('StackName')
# Create stack output and stack parameter dictionaries
if facts['stack_description']:
facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue')
facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'),
'ParameterKey', 'ParameterValue')
facts['stack_tags'] = boto3_tag_list_to_ansible_dict(facts['stack_description'].get('Tags'))
# Create optional stack outputs
all_facts = module.params.get('all_facts')
if all_facts or module.params.get('stack_resources'):
facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name)
facts['stack_resources'] = to_dict(facts.get('stack_resource_list'),
'LogicalResourceId', 'PhysicalResourceId')
if all_facts or module.params.get('stack_template'):
facts['stack_template'] = service_mgr.get_template(stack_name)
if all_facts or module.params.get('stack_policy'):
facts['stack_policy'] = service_mgr.get_stack_policy(stack_name)
if all_facts or module.params.get('stack_events'):
facts['stack_events'] = service_mgr.describe_stack_events(stack_name)
if all_facts or module.params.get('stack_change_sets'):
facts['stack_change_sets'] = service_mgr.describe_stack_change_sets(stack_name)
if is_old_facts:
result['ansible_facts']['cloudformation'][stack_name] = facts
else:
result['cloudformation'][stack_name] = camel_dict_to_snake_dict(facts, ignore_list=('stack_outputs',
'stack_parameters',
'stack_policy',
'stack_resources',
'stack_tags',
'stack_template'))
module.exit_json(changed=False, **result)
if __name__ == '__main__':
main()

File diff suppressed because it is too large Load Diff

@ -1,738 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_ami
version_added: "1.3"
short_description: Create or destroy an image (AMI) in ec2
description:
- Registers or deregisters ec2 images.
options:
instance_id:
description:
- Instance ID to create the AMI from.
type: str
name:
description:
- The name of the new AMI.
type: str
architecture:
version_added: "2.3"
description:
- The target architecture of the image to register
default: "x86_64"
type: str
kernel_id:
version_added: "2.3"
description:
- The target kernel id of the image to register.
type: str
virtualization_type:
version_added: "2.3"
description:
- The virtualization type of the image to register.
default: "hvm"
type: str
root_device_name:
version_added: "2.3"
description:
- The root device name of the image to register.
type: str
wait:
description:
- Wait for the AMI to be in state 'available' before returning.
default: false
type: bool
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 900
type: int
state:
description:
- Register or deregister an AMI.
default: 'present'
choices: [ "absent", "present" ]
type: str
description:
description:
- Human-readable string describing the contents and purpose of the AMI.
type: str
no_reboot:
description:
- Flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the
responsibility of maintaining file system integrity is left to the owner of the instance.
default: false
type: bool
image_id:
description:
- Image ID to be deregistered.
type: str
device_mapping:
version_added: "2.0"
description:
- List of device hashes/dictionaries with custom configurations (same block-device-mapping parameters).
type: list
elements: dict
suboptions:
device_name:
type: str
description: The device name. For example C(/dev/sda).
volume_type:
type: str
description: The volume type. Defaults to C(gp2) when not set.
delete_on_termination:
type: bool
description: Whether the device should be automatically deleted when the Instance is terminated.
no_device:
type: bool
description: Suppresses the specified device included in the block device mapping of the AMI.
snapshot_id:
type: str
description: The ID of the Snapshot.
iops:
type: int
description: When using an C(io1) I(volume_type) this sets the number of IOPS provisioned for the volume
encrypted:
type: bool
description: Whether the volume should be encrypted.
volume_size:
aliases: ['size']
type: int
description: The size of the volume (in GiB)
delete_snapshot:
description:
- Delete snapshots when deregistering the AMI.
default: false
type: bool
tags:
description:
- A dictionary of tags to add to the new image; '{"key":"value"}' and '{"key":"value","key":"value"}'
version_added: "2.0"
type: dict
purge_tags:
description: Whether to remove existing tags that aren't passed in the C(tags) parameter
version_added: "2.5"
default: false
type: bool
launch_permissions:
description:
- Users and groups that should be able to launch the AMI. Expects dictionary with a key of user_ids and/or group_names. user_ids should
be a list of account ids. group_name should be a list of groups, "all" is the only acceptable value currently.
- You must pass all desired launch permissions if you wish to modify existing launch permissions (passing just groups will remove all users)
version_added: "2.0"
type: dict
image_location:
description:
- The s3 location of an image to use for the AMI.
version_added: "2.5"
type: str
enhanced_networking:
description:
- A boolean representing whether enhanced networking with ENA is enabled or not.
version_added: "2.5"
type: bool
billing_products:
description:
- A list of valid billing codes. To be used with valid accounts by aws marketplace vendors.
version_added: "2.5"
type: list
elements: str
ramdisk_id:
description:
- The ID of the RAM disk.
version_added: "2.5"
type: str
sriov_net_support:
description:
- Set to simple to enable enhanced networking with the Intel 82599 Virtual Function interface for the AMI and any instances that you launch from the AMI.
version_added: "2.5"
type: str
author:
- "Evan Duffield (@scicoin-project) <eduffield@iacquire.com>"
- "Constantin Bugneac (@Constantin07) <constantin.bugneac@endava.com>"
- "Ross Williams (@gunzy83) <gunzy83au@gmail.com>"
- "Willem van Ketwich (@wilvk) <willvk@gmail.com>"
extends_documentation_fragment:
- aws
- ec2
'''
# Thank you to iAcquire for sponsoring development of this module.
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic AMI Creation
- ec2_ami:
instance_id: i-xxxxxx
wait: yes
name: newtest
tags:
Name: newtest
Service: TestService
# Basic AMI Creation, without waiting
- ec2_ami:
instance_id: i-xxxxxx
wait: no
name: newtest
# AMI Registration from EBS Snapshot
- ec2_ami:
name: newtest
state: present
architecture: x86_64
virtualization_type: hvm
root_device_name: /dev/xvda
device_mapping:
- device_name: /dev/xvda
volume_size: 8
snapshot_id: snap-xxxxxxxx
delete_on_termination: true
volume_type: gp2
# AMI Creation, with a custom root-device size and another EBS attached
- ec2_ami:
instance_id: i-xxxxxx
name: newtest
device_mapping:
- device_name: /dev/sda1
size: XXX
delete_on_termination: true
volume_type: gp2
- device_name: /dev/sdb
size: YYY
delete_on_termination: false
volume_type: gp2
# AMI Creation, excluding a volume attached at /dev/sdb
- ec2_ami:
instance_id: i-xxxxxx
name: newtest
device_mapping:
- device_name: /dev/sda1
size: XXX
delete_on_termination: true
volume_type: gp2
- device_name: /dev/sdb
no_device: yes
# Deregister/Delete AMI (keep associated snapshots)
- ec2_ami:
image_id: "{{ instance.image_id }}"
delete_snapshot: False
state: absent
# Deregister AMI (delete associated snapshots too)
- ec2_ami:
image_id: "{{ instance.image_id }}"
delete_snapshot: True
state: absent
# Update AMI Launch Permissions, making it public
- ec2_ami:
image_id: "{{ instance.image_id }}"
state: present
launch_permissions:
group_names: ['all']
# Allow AMI to be launched by another account
- ec2_ami:
image_id: "{{ instance.image_id }}"
state: present
launch_permissions:
user_ids: ['123456789012']
'''
RETURN = '''
architecture:
description: Architecture of image.
returned: when AMI is created or already exists
type: str
sample: "x86_64"
block_device_mapping:
description: Block device mapping associated with image.
returned: when AMI is created or already exists
type: dict
sample: {
"/dev/sda1": {
"delete_on_termination": true,
"encrypted": false,
"size": 10,
"snapshot_id": "snap-1a03b80e7",
"volume_type": "standard"
}
}
creationDate:
description: Creation date of image.
returned: when AMI is created or already exists
type: str
sample: "2015-10-15T22:43:44.000Z"
description:
description: Description of image.
returned: when AMI is created or already exists
type: str
sample: "nat-server"
hypervisor:
description: Type of hypervisor.
returned: when AMI is created or already exists
type: str
sample: "xen"
image_id:
description: ID of the image.
returned: when AMI is created or already exists
type: str
sample: "ami-1234abcd"
is_public:
description: Whether image is public.
returned: when AMI is created or already exists
type: bool
sample: false
launch_permission:
description: Permissions allowing other accounts to access the AMI.
returned: when AMI is created or already exists
type: list
sample:
- group: "all"
location:
description: Location of image.
returned: when AMI is created or already exists
type: str
sample: "315210894379/nat-server"
name:
description: AMI name of image.
returned: when AMI is created or already exists
type: str
sample: "nat-server"
ownerId:
description: Owner of image.
returned: when AMI is created or already exists
type: str
sample: "435210894375"
platform:
description: Platform of image.
returned: when AMI is created or already exists
type: str
sample: null
root_device_name:
description: Root device name of image.
returned: when AMI is created or already exists
type: str
sample: "/dev/sda1"
root_device_type:
description: Root device type of image.
returned: when AMI is created or already exists
type: str
sample: "ebs"
state:
description: State of image.
returned: when AMI is created or already exists
type: str
sample: "available"
tags:
description: A dictionary of tags assigned to image.
returned: when AMI is created or already exists
type: dict
sample: {
"Env": "devel",
"Name": "nat-server"
}
virtualization_type:
description: Image virtualization type.
returned: when AMI is created or already exists
type: str
sample: "hvm"
snapshots_deleted:
description: A list of snapshot ids deleted after deregistering image.
returned: after AMI is deregistered, if I(delete_snapshot=true)
type: list
sample: [
"snap-fbcccb8f",
"snap-cfe7cdb4"
]
'''
import time
from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict, compare_aws_tags
from ansible.module_utils.aws.core import AnsibleAWSModule
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
def get_block_device_mapping(image):
bdm_dict = dict()
if image is not None and image.get('block_device_mappings') is not None:
bdm = image.get('block_device_mappings')
for device in bdm:
device_name = device.get('device_name')
if 'ebs' in device:
ebs = device.get("ebs")
bdm_dict_item = {
'size': ebs.get("volume_size"),
'snapshot_id': ebs.get("snapshot_id"),
'volume_type': ebs.get("volume_type"),
'encrypted': ebs.get("encrypted"),
'delete_on_termination': ebs.get("delete_on_termination")
}
elif 'virtual_name' in device:
bdm_dict_item = dict(virtual_name=device['virtual_name'])
bdm_dict[device_name] = bdm_dict_item
return bdm_dict
def get_ami_info(camel_image):
image = camel_dict_to_snake_dict(camel_image)
return dict(
image_id=image.get("image_id"),
state=image.get("state"),
architecture=image.get("architecture"),
block_device_mapping=get_block_device_mapping(image),
creationDate=image.get("creation_date"),
description=image.get("description"),
hypervisor=image.get("hypervisor"),
is_public=image.get("public"),
location=image.get("image_location"),
ownerId=image.get("owner_id"),
root_device_name=image.get("root_device_name"),
root_device_type=image.get("root_device_type"),
virtualization_type=image.get("virtualization_type"),
name=image.get("name"),
tags=boto3_tag_list_to_ansible_dict(image.get('tags')),
platform=image.get("platform"),
enhanced_networking=image.get("ena_support"),
image_owner_alias=image.get("image_owner_alias"),
image_type=image.get("image_type"),
kernel_id=image.get("kernel_id"),
product_codes=image.get("product_codes"),
ramdisk_id=image.get("ramdisk_id"),
sriov_net_support=image.get("sriov_net_support"),
state_reason=image.get("state_reason"),
launch_permissions=image.get('launch_permissions')
)
def create_image(module, connection):
instance_id = module.params.get('instance_id')
name = module.params.get('name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
description = module.params.get('description')
architecture = module.params.get('architecture')
kernel_id = module.params.get('kernel_id')
root_device_name = module.params.get('root_device_name')
virtualization_type = module.params.get('virtualization_type')
no_reboot = module.params.get('no_reboot')
device_mapping = module.params.get('device_mapping')
tags = module.params.get('tags')
launch_permissions = module.params.get('launch_permissions')
image_location = module.params.get('image_location')
enhanced_networking = module.params.get('enhanced_networking')
billing_products = module.params.get('billing_products')
ramdisk_id = module.params.get('ramdisk_id')
sriov_net_support = module.params.get('sriov_net_support')
try:
params = {
'Name': name,
'Description': description
}
block_device_mapping = None
if device_mapping:
block_device_mapping = []
for device in device_mapping:
device['Ebs'] = {}
if 'device_name' not in device:
module.fail_json(msg="Error - Device name must be set for volume.")
device = rename_item_if_exists(device, 'device_name', 'DeviceName')
device = rename_item_if_exists(device, 'virtual_name', 'VirtualName')
device = rename_item_if_exists(device, 'no_device', 'NoDevice')
device = rename_item_if_exists(device, 'volume_type', 'VolumeType', 'Ebs')
device = rename_item_if_exists(device, 'snapshot_id', 'SnapshotId', 'Ebs')
device = rename_item_if_exists(device, 'delete_on_termination', 'DeleteOnTermination', 'Ebs')
device = rename_item_if_exists(device, 'size', 'VolumeSize', 'Ebs', attribute_type=int)
device = rename_item_if_exists(device, 'volume_size', 'VolumeSize', 'Ebs', attribute_type=int)
device = rename_item_if_exists(device, 'iops', 'Iops', 'Ebs')
device = rename_item_if_exists(device, 'encrypted', 'Encrypted', 'Ebs')
block_device_mapping.append(device)
if block_device_mapping:
params['BlockDeviceMappings'] = block_device_mapping
if instance_id:
params['InstanceId'] = instance_id
params['NoReboot'] = no_reboot
image_id = connection.create_image(**params).get('ImageId')
else:
if architecture:
params['Architecture'] = architecture
if virtualization_type:
params['VirtualizationType'] = virtualization_type
if image_location:
params['ImageLocation'] = image_location
if enhanced_networking:
params['EnaSupport'] = enhanced_networking
if billing_products:
params['BillingProducts'] = billing_products
if ramdisk_id:
params['RamdiskId'] = ramdisk_id
if sriov_net_support:
params['SriovNetSupport'] = sriov_net_support
if kernel_id:
params['KernelId'] = kernel_id
if root_device_name:
params['RootDeviceName'] = root_device_name
image_id = connection.register_image(**params).get('ImageId')
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Error registering image")
if wait:
waiter = connection.get_waiter('image_available')
delay = wait_timeout // 30
max_attempts = 30
waiter.wait(ImageIds=[image_id], WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts))
if tags:
try:
connection.create_tags(Resources=[image_id], Tags=ansible_dict_to_boto3_tag_list(tags))
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Error tagging image")
if launch_permissions:
try:
params = dict(Attribute='LaunchPermission', ImageId=image_id, LaunchPermission=dict(Add=list()))
for group_name in launch_permissions.get('group_names', []):
params['LaunchPermission']['Add'].append(dict(Group=group_name))
for user_id in launch_permissions.get('user_ids', []):
params['LaunchPermission']['Add'].append(dict(UserId=str(user_id)))
if params['LaunchPermission']['Add']:
connection.modify_image_attribute(**params)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Error setting launch permissions for image %s" % image_id)
module.exit_json(msg="AMI creation operation complete.", changed=True,
**get_ami_info(get_image_by_id(module, connection, image_id)))
def deregister_image(module, connection):
image_id = module.params.get('image_id')
delete_snapshot = module.params.get('delete_snapshot')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
image = get_image_by_id(module, connection, image_id)
if image is None:
module.exit_json(changed=False)
# Get all associated snapshot ids before deregistering image otherwise this information becomes unavailable.
snapshots = []
if 'BlockDeviceMappings' in image:
for mapping in image.get('BlockDeviceMappings'):
snapshot_id = mapping.get('Ebs', {}).get('SnapshotId')
if snapshot_id is not None:
snapshots.append(snapshot_id)
# When trying to re-deregister an already deregistered image it doesn't raise an exception, it just returns an object without image attributes.
if 'ImageId' in image:
try:
connection.deregister_image(ImageId=image_id)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Error deregistering image")
else:
module.exit_json(msg="Image %s has already been deregistered." % image_id, changed=False)
image = get_image_by_id(module, connection, image_id)
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time() and image is not None:
image = get_image_by_id(module, connection, image_id)
time.sleep(3)
if wait and wait_timeout <= time.time():
module.fail_json(msg="Timed out waiting for image to be deregistered.")
exit_params = {'msg': "AMI deregister operation complete.", 'changed': True}
if delete_snapshot:
try:
for snapshot_id in snapshots:
connection.delete_snapshot(SnapshotId=snapshot_id)
except botocore.exceptions.ClientError as e:
# Don't error out if root volume snapshot was already deregistered as part of deregister_image
if e.response['Error']['Code'] == 'InvalidSnapshot.NotFound':
pass
exit_params['snapshots_deleted'] = snapshots
module.exit_json(**exit_params)
def update_image(module, connection, image_id):
launch_permissions = module.params.get('launch_permissions')
image = get_image_by_id(module, connection, image_id)
if image is None:
module.fail_json(msg="Image %s does not exist" % image_id, changed=False)
changed = False
if launch_permissions is not None:
current_permissions = image['LaunchPermissions']
current_users = set(permission['UserId'] for permission in current_permissions if 'UserId' in permission)
desired_users = set(str(user_id) for user_id in launch_permissions.get('user_ids', []))
current_groups = set(permission['Group'] for permission in current_permissions if 'Group' in permission)
desired_groups = set(launch_permissions.get('group_names', []))
to_add_users = desired_users - current_users
to_remove_users = current_users - desired_users
to_add_groups = desired_groups - current_groups
to_remove_groups = current_groups - desired_groups
to_add = [dict(Group=group) for group in to_add_groups] + [dict(UserId=user_id) for user_id in to_add_users]
to_remove = [dict(Group=group) for group in to_remove_groups] + [dict(UserId=user_id) for user_id in to_remove_users]
if to_add or to_remove:
try:
connection.modify_image_attribute(ImageId=image_id, Attribute='launchPermission',
LaunchPermission=dict(Add=to_add, Remove=to_remove))
changed = True
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Error updating launch permissions of image %s" % image_id)
desired_tags = module.params.get('tags')
if desired_tags is not None:
current_tags = boto3_tag_list_to_ansible_dict(image.get('Tags'))
tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, purge_tags=module.params.get('purge_tags'))
if tags_to_remove:
try:
connection.delete_tags(Resources=[image_id], Tags=[dict(Key=tagkey) for tagkey in tags_to_remove])
changed = True
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Error updating tags")
if tags_to_add:
try:
connection.create_tags(Resources=[image_id], Tags=ansible_dict_to_boto3_tag_list(tags_to_add))
changed = True
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Error updating tags")
description = module.params.get('description')
if description and description != image['Description']:
try:
connection.modify_image_attribute(Attribute='Description ', ImageId=image_id, Description=dict(Value=description))
changed = True
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Error setting description for image %s" % image_id)
if changed:
module.exit_json(msg="AMI updated.", changed=True,
**get_ami_info(get_image_by_id(module, connection, image_id)))
else:
module.exit_json(msg="AMI not updated.", changed=False,
**get_ami_info(get_image_by_id(module, connection, image_id)))
def get_image_by_id(module, connection, image_id):
try:
try:
images_response = connection.describe_images(ImageIds=[image_id])
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Error retrieving image %s" % image_id)
images = images_response.get('Images')
no_images = len(images)
if no_images == 0:
return None
if no_images == 1:
result = images[0]
try:
result['LaunchPermissions'] = connection.describe_image_attribute(Attribute='launchPermission', ImageId=image_id)['LaunchPermissions']
result['ProductCodes'] = connection.describe_image_attribute(Attribute='productCodes', ImageId=image_id)['ProductCodes']
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'InvalidAMIID.Unavailable':
module.fail_json_aws(e, msg="Error retrieving image attributes for image %s" % image_id)
except botocore.exceptions.BotoCoreError as e:
module.fail_json_aws(e, msg="Error retrieving image attributes for image %s" % image_id)
return result
module.fail_json(msg="Invalid number of instances (%s) found for image_id: %s." % (str(len(images)), image_id))
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Error retrieving image by image_id")
def rename_item_if_exists(dict_object, attribute, new_attribute, child_node=None, attribute_type=None):
new_item = dict_object.get(attribute)
if new_item is not None:
if attribute_type is not None:
new_item = attribute_type(new_item)
if child_node is None:
dict_object[new_attribute] = new_item
else:
dict_object[child_node][new_attribute] = new_item
dict_object.pop(attribute)
return dict_object
def main():
argument_spec = dict(
instance_id=dict(),
image_id=dict(),
architecture=dict(default='x86_64'),
kernel_id=dict(),
virtualization_type=dict(default='hvm'),
root_device_name=dict(),
delete_snapshot=dict(default=False, type='bool'),
name=dict(),
wait=dict(type='bool', default=False),
wait_timeout=dict(default=900, type='int'),
description=dict(default=''),
no_reboot=dict(default=False, type='bool'),
state=dict(default='present', choices=['present', 'absent']),
device_mapping=dict(type='list'),
tags=dict(type='dict'),
launch_permissions=dict(type='dict'),
image_location=dict(),
enhanced_networking=dict(type='bool'),
billing_products=dict(type='list'),
ramdisk_id=dict(),
sriov_net_support=dict(),
purge_tags=dict(type='bool', default=False)
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
required_if=[
['state', 'absent', ['image_id']],
]
)
# Using a required_one_of=[['name', 'image_id']] overrides the message that should be provided by
# the required_if for state=absent, so check manually instead
if not any([module.params['image_id'], module.params['name']]):
module.fail_json(msg="one of the following is required: name, image_id")
connection = module.client('ec2')
if module.params.get('state') == 'absent':
deregister_image(module, connection)
elif module.params.get('state') == 'present':
if module.params.get('image_id'):
update_image(module, connection, module.params.get('image_id'))
if not module.params.get('instance_id') and not module.params.get('device_mapping'):
module.fail_json(msg="The parameters instance_id or device_mapping (register from EBS snapshot) are required for a new image.")
create_image(module, connection)
if __name__ == '__main__':
main()

@ -1,281 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_ami_info
version_added: '2.5'
short_description: Gather information about ec2 AMIs
description:
- Gather information about ec2 AMIs
- This module was called C(ec2_ami_facts) before Ansible 2.9. The usage did not change.
author:
- Prasad Katti (@prasadkatti)
requirements: [ boto3 ]
options:
image_ids:
description: One or more image IDs.
aliases: [image_id]
type: list
elements: str
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) for possible filters.
- Filter names and values are case sensitive.
type: dict
owners:
description:
- Filter the images by the owner. Valid options are an AWS account ID, self,
or an AWS owner alias ( amazon | aws-marketplace | microsoft ).
aliases: [owner]
type: list
elements: str
executable_users:
description:
- Filter images by users with explicit launch permissions. Valid options are an AWS account ID, self, or all (public AMIs).
aliases: [executable_user]
type: list
elements: str
describe_image_attributes:
description:
- Describe attributes (like launchPermission) of the images found.
default: no
type: bool
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: gather information about an AMI using ami-id
ec2_ami_info:
image_ids: ami-5b488823
- name: gather information about all AMIs with tag key Name and value webapp
ec2_ami_info:
filters:
"tag:Name": webapp
- name: gather information about an AMI with 'AMI Name' equal to foobar
ec2_ami_info:
filters:
name: foobar
- name: gather information about Ubuntu 17.04 AMIs published by Canonical (099720109477)
ec2_ami_info:
owners: 099720109477
filters:
name: "ubuntu/images/ubuntu-zesty-17.04-*"
'''
RETURN = '''
images:
description: A list of images.
returned: always
type: list
elements: dict
contains:
architecture:
description: The architecture of the image.
returned: always
type: str
sample: x86_64
block_device_mappings:
description: Any block device mapping entries.
returned: always
type: list
elements: dict
contains:
device_name:
description: The device name exposed to the instance.
returned: always
type: str
sample: /dev/sda1
ebs:
description: EBS volumes
returned: always
type: complex
creation_date:
description: The date and time the image was created.
returned: always
type: str
sample: '2017-10-16T19:22:13.000Z'
description:
description: The description of the AMI.
returned: always
type: str
sample: ''
ena_support:
description: Whether enhanced networking with ENA is enabled.
returned: always
type: bool
sample: true
hypervisor:
description: The hypervisor type of the image.
returned: always
type: str
sample: xen
image_id:
description: The ID of the AMI.
returned: always
type: str
sample: ami-5b466623
image_location:
description: The location of the AMI.
returned: always
type: str
sample: 408466080000/Webapp
image_type:
description: The type of image.
returned: always
type: str
sample: machine
launch_permissions:
description: A List of AWS accounts may launch the AMI.
returned: When image is owned by calling account and I(describe_image_attributes) is yes.
type: list
elements: dict
contains:
group:
description: A value of 'all' means the AMI is public.
type: str
user_id:
description: An AWS account ID with permissions to launch the AMI.
type: str
sample: [{"group": "all"}, {"user_id": "408466080000"}]
name:
description: The name of the AMI that was provided during image creation.
returned: always
type: str
sample: Webapp
owner_id:
description: The AWS account ID of the image owner.
returned: always
type: str
sample: '408466080000'
public:
description: Whether the image has public launch permissions.
returned: always
type: bool
sample: true
root_device_name:
description: The device name of the root device.
returned: always
type: str
sample: /dev/sda1
root_device_type:
description: The type of root device used by the AMI.
returned: always
type: str
sample: ebs
sriov_net_support:
description: Whether enhanced networking is enabled.
returned: always
type: str
sample: simple
state:
description: The current state of the AMI.
returned: always
type: str
sample: available
tags:
description: Any tags assigned to the image.
returned: always
type: dict
virtualization_type:
description: The type of virtualization of the AMI.
returned: always
type: str
sample: hvm
'''
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict
def list_ec2_images(ec2_client, module):
image_ids = module.params.get("image_ids")
owners = module.params.get("owners")
executable_users = module.params.get("executable_users")
filters = module.params.get("filters")
owner_param = []
# describe_images is *very* slow if you pass the `Owners`
# param (unless it's self), for some reason.
# Converting the owners to filters and removing from the
# owners param greatly speeds things up.
# Implementation based on aioue's suggestion in #24886
for owner in owners:
if owner.isdigit():
if 'owner-id' not in filters:
filters['owner-id'] = list()
filters['owner-id'].append(owner)
elif owner == 'self':
# self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html)
owner_param.append(owner)
else:
if 'owner-alias' not in filters:
filters['owner-alias'] = list()
filters['owner-alias'].append(owner)
filters = ansible_dict_to_boto3_filter_list(filters)
try:
images = ec2_client.describe_images(ImageIds=image_ids, Filters=filters, Owners=owner_param, ExecutableUsers=executable_users)
images = [camel_dict_to_snake_dict(image) for image in images["Images"]]
except (ClientError, BotoCoreError) as err:
module.fail_json_aws(err, msg="error describing images")
for image in images:
try:
image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', []))
if module.params.get("describe_image_attributes"):
launch_permissions = ec2_client.describe_image_attribute(Attribute='launchPermission', ImageId=image['image_id'])['LaunchPermissions']
image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions]
except (ClientError, BotoCoreError) as err:
# describing launch permissions of images owned by others is not permitted, but shouldn't cause failures
pass
images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist
module.exit_json(images=images)
def main():
argument_spec = dict(
image_ids=dict(default=[], type='list', aliases=['image_id']),
filters=dict(default={}, type='dict'),
owners=dict(default=[], type='list', aliases=['owner']),
executable_users=dict(default=[], type='list', aliases=['executable_user']),
describe_image_attributes=dict(default=False, type='bool')
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
if module._module._name == 'ec2_ami_facts':
module._module.deprecate("The 'ec2_ami_facts' module has been renamed to 'ec2_ami_info'", version='2.13')
ec2_client = module.client('ec2')
list_ec2_images(ec2_client, module)
if __name__ == '__main__':
main()

File diff suppressed because it is too large Load Diff

@ -1,633 +0,0 @@
#!/usr/bin/python
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_eni
short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
description:
- Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID or private_ip is
provided, the existing ENI (if any) will be modified. The 'attached' parameter controls the attachment status
of the network interface.
version_added: "2.0"
author: "Rob White (@wimnat)"
options:
eni_id:
description:
- The ID of the ENI (to modify).
- If I(eni_id=None) and I(state=present), a new eni will be created.
type: str
instance_id:
description:
- Instance ID that you wish to attach ENI to.
- Since version 2.2, use the I(attached) parameter to attach or detach an ENI. Prior to 2.2, to detach an ENI from an instance, use C(None).
type: str
private_ip_address:
description:
- Private IP address.
type: str
subnet_id:
description:
- ID of subnet in which to create the ENI.
type: str
description:
description:
- Optional description of the ENI.
type: str
security_groups:
description:
- List of security groups associated with the interface. Only used when I(state=present).
- Since version 2.2, you can specify security groups by ID or by name or a combination of both. Prior to 2.2, you can specify only by ID.
type: list
elements: str
state:
description:
- Create or delete ENI.
default: present
choices: [ 'present', 'absent' ]
type: str
device_index:
description:
- The index of the device for the network interface attachment on the instance.
default: 0
type: int
attached:
description:
- Specifies if network interface should be attached or detached from instance. If omitted, attachment status
won't change
version_added: 2.2
type: bool
force_detach:
description:
- Force detachment of the interface. This applies either when explicitly detaching the interface by setting I(instance_id=None)
or when deleting an interface with I(state=absent).
default: false
type: bool
delete_on_termination:
description:
- Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the
interface is being modified, not on creation.
required: false
type: bool
source_dest_check:
description:
- By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled.
You can only specify this flag when the interface is being modified, not on creation.
required: false
type: bool
secondary_private_ip_addresses:
description:
- A list of IP addresses to assign as secondary IP addresses to the network interface.
This option is mutually exclusive of I(secondary_private_ip_address_count)
required: false
version_added: 2.2
type: list
elements: str
purge_secondary_private_ip_addresses:
description:
- To be used with I(secondary_private_ip_addresses) to determine whether or not to remove any secondary IP addresses other than those specified.
- Set I(secondary_private_ip_addresses=[]) to purge all secondary addresses.
default: false
type: bool
version_added: 2.5
secondary_private_ip_address_count:
description:
- The number of secondary IP addresses to assign to the network interface. This option is mutually exclusive of I(secondary_private_ip_addresses)
required: false
version_added: 2.2
type: int
allow_reassignment:
description:
- Indicates whether to allow an IP address that is already assigned to another network interface or instance
to be reassigned to the specified network interface.
required: false
default: false
type: bool
version_added: 2.7
extends_documentation_fragment:
- aws
- ec2
notes:
- This module identifies and ENI based on either the I(eni_id), a combination of I(private_ip_address) and I(subnet_id),
or a combination of I(instance_id) and I(device_id). Any of these options will let you specify a particular ENI.
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create an ENI. As no security group is defined, ENI will be created in default security group
- ec2_eni:
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Create an ENI and attach it to an instance
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Create an ENI with two secondary addresses
- ec2_eni:
subnet_id: subnet-xxxxxxxx
state: present
secondary_private_ip_address_count: 2
# Assign a secondary IP address to an existing ENI
# This will purge any existing IPs
- ec2_eni:
subnet_id: subnet-xxxxxxxx
eni_id: eni-yyyyyyyy
state: present
secondary_private_ip_addresses:
- 172.16.1.1
# Remove any secondary IP addresses from an existing ENI
- ec2_eni:
subnet_id: subnet-xxxxxxxx
eni_id: eni-yyyyyyyy
state: present
secondary_private_ip_address_count: 0
# Destroy an ENI, detaching it from any instance if necessary
- ec2_eni:
eni_id: eni-xxxxxxx
force_detach: true
state: absent
# Update an ENI
- ec2_eni:
eni_id: eni-xxxxxxx
description: "My new description"
state: present
# Update an ENI identifying it by private_ip_address and subnet_id
- ec2_eni:
subnet_id: subnet-xxxxxxx
private_ip_address: 172.16.1.1
description: "My new description"
# Detach an ENI from an instance
- ec2_eni:
eni_id: eni-xxxxxxx
instance_id: None
state: present
### Delete an interface on termination
# First create the interface
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
register: eni
# Modify the interface to enable the delete_on_terminaton flag
- ec2_eni:
eni_id: "{{ eni.interface.id }}"
delete_on_termination: true
'''
RETURN = '''
interface:
description: Network interface attributes
returned: when state != absent
type: complex
contains:
description:
description: interface description
type: str
sample: Firewall network interface
groups:
description: list of security groups
type: list
elements: dict
sample: [ { "sg-f8a8a9da": "default" } ]
id:
description: network interface id
type: str
sample: "eni-1d889198"
mac_address:
description: interface's physical address
type: str
sample: "00:00:5E:00:53:23"
owner_id:
description: aws account id
type: str
sample: 812381371
private_ip_address:
description: primary ip address of this interface
type: str
sample: 10.20.30.40
private_ip_addresses:
description: list of all private ip addresses associated to this interface
type: list
elements: dict
sample: [ { "primary_address": true, "private_ip_address": "10.20.30.40" } ]
source_dest_check:
description: value of source/dest check flag
type: bool
sample: True
status:
description: network interface status
type: str
sample: "pending"
subnet_id:
description: which vpc subnet the interface is bound
type: str
sample: subnet-b0a0393c
vpc_id:
description: which vpc this network interface is bound
type: str
sample: vpc-9a9a9da
'''
import time
import re
try:
import boto.ec2
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (AnsibleAWSError, connect_to_aws,
ec2_argument_spec, get_aws_connection_info,
get_ec2_security_group_ids_from_names)
def get_eni_info(interface):
# Private addresses
private_addresses = []
for ip in interface.private_ip_addresses:
private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
interface_info = {'id': interface.id,
'subnet_id': interface.subnet_id,
'vpc_id': interface.vpc_id,
'description': interface.description,
'owner_id': interface.owner_id,
'status': interface.status,
'mac_address': interface.mac_address,
'private_ip_address': interface.private_ip_address,
'source_dest_check': interface.source_dest_check,
'groups': dict((group.id, group.name) for group in interface.groups),
'private_ip_addresses': private_addresses
}
if interface.attachment is not None:
interface_info['attachment'] = {'attachment_id': interface.attachment.id,
'instance_id': interface.attachment.instance_id,
'device_index': interface.attachment.device_index,
'status': interface.attachment.status,
'attach_time': interface.attachment.attach_time,
'delete_on_termination': interface.attachment.delete_on_termination,
}
return interface_info
def wait_for_eni(eni, status):
while True:
time.sleep(3)
eni.update()
# If the status is detached we just need attachment to disappear
if eni.attachment is None:
if status == "detached":
break
else:
if status == "attached" and eni.attachment.status == "attached":
break
def create_eni(connection, vpc_id, module):
instance_id = module.params.get("instance_id")
attached = module.params.get("attached")
if instance_id == 'None':
instance_id = None
device_index = module.params.get("device_index")
subnet_id = module.params.get('subnet_id')
private_ip_address = module.params.get('private_ip_address')
description = module.params.get('description')
security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection, vpc_id=vpc_id, boto3=False)
secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
changed = False
try:
eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups)
if attached and instance_id is not None:
try:
eni.attach(instance_id, device_index)
except BotoServerError:
eni.delete()
raise
# Wait to allow creation / attachment to finish
wait_for_eni(eni, "attached")
eni.update()
if secondary_private_ip_address_count is not None:
try:
connection.assign_private_ip_addresses(network_interface_id=eni.id, secondary_private_ip_address_count=secondary_private_ip_address_count)
except BotoServerError:
eni.delete()
raise
if secondary_private_ip_addresses is not None:
try:
connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=secondary_private_ip_addresses)
except BotoServerError:
eni.delete()
raise
changed = True
except BotoServerError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed, interface=get_eni_info(eni))
def modify_eni(connection, vpc_id, module, eni):
instance_id = module.params.get("instance_id")
attached = module.params.get("attached")
do_detach = module.params.get('state') == 'detached'
device_index = module.params.get("device_index")
description = module.params.get('description')
security_groups = module.params.get('security_groups')
force_detach = module.params.get("force_detach")
source_dest_check = module.params.get("source_dest_check")
delete_on_termination = module.params.get("delete_on_termination")
secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
purge_secondary_private_ip_addresses = module.params.get("purge_secondary_private_ip_addresses")
secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
allow_reassignment = module.params.get("allow_reassignment")
changed = False
try:
if description is not None:
if eni.description != description:
connection.modify_network_interface_attribute(eni.id, "description", description)
changed = True
if len(security_groups) > 0:
groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=vpc_id, boto3=False)
if sorted(get_sec_group_list(eni.groups)) != sorted(groups):
connection.modify_network_interface_attribute(eni.id, "groupSet", groups)
changed = True
if source_dest_check is not None:
if eni.source_dest_check != source_dest_check:
connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check)
changed = True
if delete_on_termination is not None and eni.attachment is not None:
if eni.attachment.delete_on_termination is not delete_on_termination:
connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id)
changed = True
current_secondary_addresses = [i.private_ip_address for i in eni.private_ip_addresses if not i.primary]
if secondary_private_ip_addresses is not None:
secondary_addresses_to_remove = list(set(current_secondary_addresses) - set(secondary_private_ip_addresses))
if secondary_addresses_to_remove and purge_secondary_private_ip_addresses:
connection.unassign_private_ip_addresses(network_interface_id=eni.id,
private_ip_addresses=list(set(current_secondary_addresses) -
set(secondary_private_ip_addresses)),
dry_run=False)
changed = True
secondary_addresses_to_add = list(set(secondary_private_ip_addresses) - set(current_secondary_addresses))
if secondary_addresses_to_add:
connection.assign_private_ip_addresses(network_interface_id=eni.id,
private_ip_addresses=secondary_addresses_to_add,
secondary_private_ip_address_count=None,
allow_reassignment=allow_reassignment, dry_run=False)
changed = True
if secondary_private_ip_address_count is not None:
current_secondary_address_count = len(current_secondary_addresses)
if secondary_private_ip_address_count > current_secondary_address_count:
connection.assign_private_ip_addresses(network_interface_id=eni.id,
private_ip_addresses=None,
secondary_private_ip_address_count=(secondary_private_ip_address_count -
current_secondary_address_count),
allow_reassignment=allow_reassignment, dry_run=False)
changed = True
elif secondary_private_ip_address_count < current_secondary_address_count:
# How many of these addresses do we want to remove
secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count
connection.unassign_private_ip_addresses(network_interface_id=eni.id,
private_ip_addresses=current_secondary_addresses[:secondary_addresses_to_remove_count],
dry_run=False)
if attached is True:
if eni.attachment and eni.attachment.instance_id != instance_id:
detach_eni(eni, module)
eni.attach(instance_id, device_index)
wait_for_eni(eni, "attached")
changed = True
if eni.attachment is None:
eni.attach(instance_id, device_index)
wait_for_eni(eni, "attached")
changed = True
elif attached is False:
detach_eni(eni, module)
except BotoServerError as e:
module.fail_json(msg=e.message)
eni.update()
module.exit_json(changed=changed, interface=get_eni_info(eni))
def delete_eni(connection, module):
eni_id = module.params.get("eni_id")
force_detach = module.params.get("force_detach")
try:
eni_result_set = connection.get_all_network_interfaces(eni_id)
eni = eni_result_set[0]
if force_detach is True:
if eni.attachment is not None:
eni.detach(force_detach)
# Wait to allow detachment to finish
wait_for_eni(eni, "detached")
eni.update()
eni.delete()
changed = True
else:
eni.delete()
changed = True
module.exit_json(changed=changed)
except BotoServerError as e:
regex = re.compile('The networkInterface ID \'.*\' does not exist')
if regex.search(e.message) is not None:
module.exit_json(changed=False)
else:
module.fail_json(msg=e.message)
def detach_eni(eni, module):
attached = module.params.get("attached")
force_detach = module.params.get("force_detach")
if eni.attachment is not None:
eni.detach(force_detach)
wait_for_eni(eni, "detached")
if attached:
return
eni.update()
module.exit_json(changed=True, interface=get_eni_info(eni))
else:
module.exit_json(changed=False, interface=get_eni_info(eni))
def uniquely_find_eni(connection, module):
eni_id = module.params.get("eni_id")
private_ip_address = module.params.get('private_ip_address')
subnet_id = module.params.get('subnet_id')
instance_id = module.params.get('instance_id')
device_index = module.params.get('device_index')
attached = module.params.get('attached')
try:
filters = {}
# proceed only if we're univocally specifying an ENI
if eni_id is None and private_ip_address is None and (instance_id is None and device_index is None):
return None
if private_ip_address and subnet_id:
filters['private-ip-address'] = private_ip_address
filters['subnet-id'] = subnet_id
if not attached and instance_id and device_index:
filters['attachment.instance-id'] = instance_id
filters['attachment.device-index'] = device_index
if eni_id is None and len(filters) == 0:
return None
eni_result = connection.get_all_network_interfaces(eni_id, filters=filters)
if len(eni_result) == 1:
return eni_result[0]
else:
return None
except BotoServerError as e:
module.fail_json(msg=e.message)
return None
def get_sec_group_list(groups):
# Build list of remote security groups
remote_security_groups = []
for group in groups:
remote_security_groups.append(group.id.encode())
return remote_security_groups
def _get_vpc_id(connection, module, subnet_id):
try:
return connection.get_all_subnets(subnet_ids=[subnet_id])[0].vpc_id
except BotoServerError as e:
module.fail_json(msg=e.message)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
eni_id=dict(default=None, type='str'),
instance_id=dict(default=None, type='str'),
private_ip_address=dict(type='str'),
subnet_id=dict(type='str'),
description=dict(type='str'),
security_groups=dict(default=[], type='list'),
device_index=dict(default=0, type='int'),
state=dict(default='present', choices=['present', 'absent']),
force_detach=dict(default='no', type='bool'),
source_dest_check=dict(default=None, type='bool'),
delete_on_termination=dict(default=None, type='bool'),
secondary_private_ip_addresses=dict(default=None, type='list'),
purge_secondary_private_ip_addresses=dict(default=False, type='bool'),
secondary_private_ip_address_count=dict(default=None, type='int'),
allow_reassignment=dict(default=False, type='bool'),
attached=dict(default=None, type='bool')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[
['secondary_private_ip_addresses', 'secondary_private_ip_address_count']
],
required_if=([
('state', 'absent', ['eni_id']),
('attached', True, ['instance_id']),
('purge_secondary_private_ip_addresses', True, ['secondary_private_ip_addresses'])
])
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
vpc_connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
state = module.params.get("state")
if state == 'present':
eni = uniquely_find_eni(connection, module)
if eni is None:
subnet_id = module.params.get("subnet_id")
if subnet_id is None:
module.fail_json(msg="subnet_id is required when creating a new ENI")
vpc_id = _get_vpc_id(vpc_connection, module, subnet_id)
create_eni(connection, vpc_id, module)
else:
vpc_id = eni.vpc_id
modify_eni(connection, vpc_id, module, eni)
elif state == 'absent':
delete_eni(connection, module)
if __name__ == '__main__':
main()

@ -1,275 +0,0 @@
#!/usr/bin/python
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_eni_info
short_description: Gather information about ec2 ENI interfaces in AWS
description:
- Gather information about ec2 ENI interfaces in AWS.
- This module was called C(ec2_eni_facts) before Ansible 2.9. The usage did not change.
version_added: "2.0"
author: "Rob White (@wimnat)"
requirements: [ boto3 ]
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters.
type: dict
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all ENIs
- ec2_eni_info:
# Gather information about a particular ENI
- ec2_eni_info:
filters:
network-interface-id: eni-xxxxxxx
'''
RETURN = '''
network_interfaces:
description: List of matching elastic network interfaces
returned: always
type: complex
contains:
association:
description: Info of associated elastic IP (EIP)
returned: always, empty dict if no association exists
type: dict
sample: {
allocation_id: "eipalloc-5sdf123",
association_id: "eipassoc-8sdf123",
ip_owner_id: "4415120123456",
public_dns_name: "ec2-52-1-0-63.compute-1.amazonaws.com",
public_ip: "52.1.0.63"
}
attachment:
description: Info about attached ec2 instance
returned: always, empty dict if ENI is not attached
type: dict
sample: {
attach_time: "2017-08-05T15:25:47+00:00",
attachment_id: "eni-attach-149d21234",
delete_on_termination: false,
device_index: 1,
instance_id: "i-15b8d3cadbafa1234",
instance_owner_id: "4415120123456",
status: "attached"
}
availability_zone:
description: Availability zone of ENI
returned: always
type: str
sample: "us-east-1b"
description:
description: Description text for ENI
returned: always
type: str
sample: "My favourite network interface"
groups:
description: List of attached security groups
returned: always
type: list
sample: [
{
group_id: "sg-26d0f1234",
group_name: "my_ec2_security_group"
}
]
id:
description: The id of the ENI (alias for network_interface_id)
returned: always
type: str
sample: "eni-392fsdf"
interface_type:
description: Type of the network interface
returned: always
type: str
sample: "interface"
ipv6_addresses:
description: List of IPv6 addresses for this interface
returned: always
type: list
sample: []
mac_address:
description: MAC address of the network interface
returned: always
type: str
sample: "0a:f8:10:2f:ab:a1"
network_interface_id:
description: The id of the ENI
returned: always
type: str
sample: "eni-392fsdf"
owner_id:
description: AWS account id of the owner of the ENI
returned: always
type: str
sample: "4415120123456"
private_dns_name:
description: Private DNS name for the ENI
returned: always
type: str
sample: "ip-172-16-1-180.ec2.internal"
private_ip_address:
description: Private IP address for the ENI
returned: always
type: str
sample: "172.16.1.180"
private_ip_addresses:
description: List of private IP addresses attached to the ENI
returned: always
type: list
sample: []
requester_id:
description: The ID of the entity that launched the ENI
returned: always
type: str
sample: "AIDAIONYVJQNIAZFT3ABC"
requester_managed:
description: Indicates whether the network interface is being managed by an AWS service.
returned: always
type: bool
sample: false
source_dest_check:
description: Indicates whether the network interface performs source/destination checking.
returned: always
type: bool
sample: false
status:
description: Indicates if the network interface is attached to an instance or not
returned: always
type: str
sample: "in-use"
subnet_id:
description: Subnet ID the ENI is in
returned: always
type: str
sample: "subnet-7bbf01234"
tag_set:
description: Dictionary of tags added to the ENI
returned: always
type: dict
sample: {}
vpc_id:
description: ID of the VPC the network interface it part of
returned: always
type: str
sample: "vpc-b3f1f123"
'''
try:
from botocore.exceptions import ClientError, NoCredentialsError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_conn
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info
def list_eni(connection, module):
if module.params.get("filters") is None:
filters = []
else:
filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
try:
network_interfaces_result = connection.describe_network_interfaces(Filters=filters)['NetworkInterfaces']
except (ClientError, NoCredentialsError) as e:
module.fail_json(msg=e.message)
# Modify boto3 tags list to be ansible friendly dict and then camel_case
camel_network_interfaces = []
for network_interface in network_interfaces_result:
network_interface['TagSet'] = boto3_tag_list_to_ansible_dict(network_interface['TagSet'])
# Added id to interface info to be compatible with return values of ec2_eni module:
network_interface['Id'] = network_interface['NetworkInterfaceId']
camel_network_interfaces.append(camel_dict_to_snake_dict(network_interface))
module.exit_json(network_interfaces=camel_network_interfaces)
def get_eni_info(interface):
# Private addresses
private_addresses = []
for ip in interface.private_ip_addresses:
private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
interface_info = {'id': interface.id,
'subnet_id': interface.subnet_id,
'vpc_id': interface.vpc_id,
'description': interface.description,
'owner_id': interface.owner_id,
'status': interface.status,
'mac_address': interface.mac_address,
'private_ip_address': interface.private_ip_address,
'source_dest_check': interface.source_dest_check,
'groups': dict((group.id, group.name) for group in interface.groups),
'private_ip_addresses': private_addresses
}
if hasattr(interface, 'publicDnsName'):
interface_info['association'] = {'public_ip_address': interface.publicIp,
'public_dns_name': interface.publicDnsName,
'ip_owner_id': interface.ipOwnerId
}
if interface.attachment is not None:
interface_info['attachment'] = {'attachment_id': interface.attachment.id,
'instance_id': interface.attachment.instance_id,
'device_index': interface.attachment.device_index,
'status': interface.attachment.status,
'attach_time': interface.attachment.attach_time,
'delete_on_termination': interface.attachment.delete_on_termination,
}
return interface_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(default=None, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if module._name == 'ec2_eni_facts':
module.deprecate("The 'ec2_eni_facts' module has been renamed to 'ec2_eni_info'", version='2.13')
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
list_eni(connection, module)
if __name__ == '__main__':
main()

File diff suppressed because it is too large Load Diff

@ -1,143 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_group_info
short_description: Gather information about ec2 security groups in AWS.
description:
- Gather information about ec2 security groups in AWS.
- This module was called C(ec2_group_facts) before Ansible 2.9. The usage did not change.
version_added: "2.3"
requirements: [ boto3 ]
author:
- Henrique Rodrigues (@Sodki)
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) for
possible filters. Filter names and values are case sensitive. You can also use underscores (_)
instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
required: false
default: {}
type: dict
notes:
- By default, the module will return all security groups. To limit results use the appropriate filters.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all security groups
- ec2_group_info:
# Gather information about all security groups in a specific VPC
- ec2_group_info:
filters:
vpc-id: vpc-12345678
# Gather information about all security groups in a specific VPC
- ec2_group_info:
filters:
vpc-id: vpc-12345678
# Gather information about a security group
- ec2_group_info:
filters:
group-name: example-1
# Gather information about a security group by id
- ec2_group_info:
filters:
group-id: sg-12345678
# Gather information about a security group with multiple filters, also mixing the use of underscores as filter keys
- ec2_group_info:
filters:
group_id: sg-12345678
vpc-id: vpc-12345678
# Gather information about various security groups
- ec2_group_info:
filters:
group-name:
- example-1
- example-2
- example-3
# Gather information about any security group with a tag key Name and value Example.
# The quotes around 'tag:name' are important because of the colon in the value
- ec2_group_info:
filters:
"tag:Name": Example
'''
RETURN = '''
security_groups:
description: Security groups that match the provided filters. Each element consists of a dict with all the information related to that security group.
type: list
returned: always
sample:
'''
try:
from botocore.exceptions import BotoCoreError, ClientError
except ImportError:
pass # caught by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import (boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict)
def main():
argument_spec = dict(
filters=dict(default={}, type='dict')
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
if module._name == 'ec2_group_facts':
module.deprecate("The 'ec2_group_facts' module has been renamed to 'ec2_group_info'", version='2.13')
connection = module.client('ec2')
# Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags
sanitized_filters = module.params.get("filters")
for key in list(sanitized_filters):
if not key.startswith("tag:"):
sanitized_filters[key.replace("_", "-")] = sanitized_filters.pop(key)
try:
security_groups = connection.describe_security_groups(
Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to describe security groups')
snaked_security_groups = []
for security_group in security_groups['SecurityGroups']:
# Modify boto3 tags list to be ansible friendly dict
# but don't camel case tags
security_group = camel_dict_to_snake_dict(security_group)
security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', {}), tag_name_key_name='key', tag_value_key_name='value')
snaked_security_groups.append(security_group)
module.exit_json(security_groups=snaked_security_groups)
if __name__ == '__main__':
main()

@ -1,271 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_key
version_added: "1.5"
short_description: create or delete an ec2 key pair
description:
- create or delete an ec2 key pair.
options:
name:
description:
- Name of the key pair.
required: true
type: str
key_material:
description:
- Public key material.
required: false
type: str
force:
description:
- Force overwrite of already existing key pair if key has changed.
required: false
default: true
type: bool
version_added: "2.3"
state:
description:
- create or delete keypair
required: false
choices: [ present, absent ]
default: 'present'
type: str
wait:
description:
- This option has no effect since version 2.5 and will be removed in 2.14.
version_added: "1.6"
type: bool
wait_timeout:
description:
- This option has no effect since version 2.5 and will be removed in 2.14.
version_added: "1.6"
type: int
required: false
extends_documentation_fragment:
- aws
- ec2
requirements: [ boto3 ]
author:
- "Vincent Viallet (@zbal)"
- "Prasad Katti (@prasadkatti)"
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: create a new ec2 key pair, returns generated private key
ec2_key:
name: my_keypair
- name: create key pair using provided key_material
ec2_key:
name: my_keypair
key_material: 'ssh-rsa AAAAxyz...== me@example.com'
- name: create key pair using key_material obtained using 'file' lookup plugin
ec2_key:
name: my_keypair
key_material: "{{ lookup('file', '/path/to/public_key/id_rsa.pub') }}"
# try creating a key pair with the name of an already existing keypair
# but don't overwrite it even if the key is different (force=false)
- name: try creating a key pair with name of an already existing keypair
ec2_key:
name: my_existing_keypair
key_material: 'ssh-rsa AAAAxyz...== me@example.com'
force: false
- name: remove key pair by name
ec2_key:
name: my_keypair
state: absent
'''
RETURN = '''
changed:
description: whether a keypair was created/deleted
returned: always
type: bool
sample: true
msg:
description: short message describing the action taken
returned: always
type: str
sample: key pair created
key:
description: details of the keypair (this is set to null when state is absent)
returned: always
type: complex
contains:
fingerprint:
description: fingerprint of the key
returned: when state is present
type: str
sample: 'b0:22:49:61:d9:44:9d:0c:7e:ac:8a:32:93:21:6c:e8:fb:59:62:43'
name:
description: name of the keypair
returned: when state is present
type: str
sample: my_keypair
private_key:
description: private key of a newly created keypair
returned: when a new keypair is created by AWS (key_material is not provided)
type: str
sample: '-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKC...
-----END RSA PRIVATE KEY-----'
'''
import uuid
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils._text import to_bytes
try:
from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
def extract_key_data(key):
data = {
'name': key['KeyName'],
'fingerprint': key['KeyFingerprint']
}
if 'KeyMaterial' in key:
data['private_key'] = key['KeyMaterial']
return data
def get_key_fingerprint(module, ec2_client, key_material):
'''
EC2's fingerprints are non-trivial to generate, so push this key
to a temporary name and make ec2 calculate the fingerprint for us.
http://blog.jbrowne.com/?p=23
https://forums.aws.amazon.com/thread.jspa?messageID=352828
'''
# find an unused name
name_in_use = True
while name_in_use:
random_name = "ansible-" + str(uuid.uuid4())
name_in_use = find_key_pair(module, ec2_client, random_name)
temp_key = import_key_pair(module, ec2_client, random_name, key_material)
delete_key_pair(module, ec2_client, random_name, finish_task=False)
return temp_key['KeyFingerprint']
def find_key_pair(module, ec2_client, name):
try:
key = ec2_client.describe_key_pairs(KeyNames=[name])['KeyPairs'][0]
except ClientError as err:
if err.response['Error']['Code'] == "InvalidKeyPair.NotFound":
return None
module.fail_json_aws(err, msg="error finding keypair")
except IndexError:
key = None
return key
def create_key_pair(module, ec2_client, name, key_material, force):
key = find_key_pair(module, ec2_client, name)
if key:
if key_material and force:
if not module.check_mode:
new_fingerprint = get_key_fingerprint(module, ec2_client, key_material)
if key['KeyFingerprint'] != new_fingerprint:
delete_key_pair(module, ec2_client, name, finish_task=False)
key = import_key_pair(module, ec2_client, name, key_material)
key_data = extract_key_data(key)
module.exit_json(changed=True, key=key_data, msg="key pair updated")
else:
# Assume a change will be made in check mode since a comparison can't be done
module.exit_json(changed=True, key=extract_key_data(key), msg="key pair updated")
key_data = extract_key_data(key)
module.exit_json(changed=False, key=key_data, msg="key pair already exists")
else:
# key doesn't exist, create it now
key_data = None
if not module.check_mode:
if key_material:
key = import_key_pair(module, ec2_client, name, key_material)
else:
try:
key = ec2_client.create_key_pair(KeyName=name)
except ClientError as err:
module.fail_json_aws(err, msg="error creating key")
key_data = extract_key_data(key)
module.exit_json(changed=True, key=key_data, msg="key pair created")
def import_key_pair(module, ec2_client, name, key_material):
try:
key = ec2_client.import_key_pair(KeyName=name, PublicKeyMaterial=to_bytes(key_material))
except ClientError as err:
module.fail_json_aws(err, msg="error importing key")
return key
def delete_key_pair(module, ec2_client, name, finish_task=True):
key = find_key_pair(module, ec2_client, name)
if key:
if not module.check_mode:
try:
ec2_client.delete_key_pair(KeyName=name)
except ClientError as err:
module.fail_json_aws(err, msg="error deleting key")
if not finish_task:
return
module.exit_json(changed=True, key=None, msg="key deleted")
module.exit_json(key=None, msg="key did not exist")
def main():
argument_spec = dict(
name=dict(required=True),
key_material=dict(),
force=dict(type='bool', default=True),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', removed_in_version='2.14'),
wait_timeout=dict(type='int', removed_in_version='2.14')
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
ec2_client = module.client('ec2')
name = module.params['name']
state = module.params.get('state')
key_material = module.params.get('key_material')
force = module.params.get('force')
if state == 'absent':
delete_key_pair(module, ec2_client, name)
elif state == 'present':
create_key_pair(module, ec2_client, name, key_material, force)
if __name__ == '__main__':
main()

@ -1,564 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_metadata_facts
short_description: Gathers facts (instance metadata) about remote hosts within ec2
version_added: "1.0"
author:
- Silviu Dicu (@silviud)
- Vinay Dandekar (@roadmapper)
description:
- This module fetches data from the instance metadata endpoint in ec2 as per
U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html).
- The module must be called from within the EC2 instance itself.
notes:
- Parameters to filter on ec2_metadata_facts may be added later.
'''
EXAMPLES = '''
# Gather EC2 metadata facts
- ec2_metadata_facts:
- debug:
msg: "This instance is a t1.micro"
when: ansible_ec2_instance_type == "t1.micro"
'''
RETURN = '''
ansible_facts:
description: Dictionary of new facts representing discovered properties of the EC2 instance.
returned: changed
type: complex
contains:
ansible_ec2_ami_id:
description: The AMI ID used to launch the instance.
type: str
sample: "ami-XXXXXXXX"
ansible_ec2_ami_launch_index:
description:
- If you started more than one instance at the same time, this value indicates the order in which the instance was launched.
- The value of the first instance launched is 0.
type: str
sample: "0"
ansible_ec2_ami_manifest_path:
description:
- The path to the AMI manifest file in Amazon S3.
- If you used an Amazon EBS-backed AMI to launch the instance, the returned result is unknown.
type: str
sample: "(unknown)"
ansible_ec2_ancestor_ami_ids:
description:
- The AMI IDs of any instances that were rebundled to create this AMI.
- This value will only exist if the AMI manifest file contained an ancestor-amis key.
type: str
sample: "(unknown)"
ansible_ec2_block_device_mapping_ami:
description: The virtual device that contains the root/boot file system.
type: str
sample: "/dev/sda1"
ansible_ec2_block_device_mapping_ebsN:
description:
- The virtual devices associated with Amazon EBS volumes, if any are present.
- Amazon EBS volumes are only available in metadata if they were present at launch time or when the instance was last started.
- The N indicates the index of the Amazon EBS volume (such as ebs1 or ebs2).
type: str
sample: "/dev/xvdb"
ansible_ec2_block_device_mapping_ephemeralN:
description: The virtual devices associated with ephemeral devices, if any are present. The N indicates the index of the ephemeral volume.
type: str
sample: "/dev/xvdc"
ansible_ec2_block_device_mapping_root:
description:
- The virtual devices or partitions associated with the root devices, or partitions on the virtual device,
where the root (/ or C) file system is associated with the given instance.
type: str
sample: "/dev/sda1"
ansible_ec2_block_device_mapping_swap:
description: The virtual devices associated with swap. Not always present.
type: str
sample: "/dev/sda2"
ansible_ec2_fws_instance_monitoring:
description: "Value showing whether the customer has enabled detailed one-minute monitoring in CloudWatch."
type: str
sample: "enabled"
ansible_ec2_hostname:
description:
- The private IPv4 DNS hostname of the instance.
- In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
type: str
sample: "ip-10-0-0-1.ec2.internal"
ansible_ec2_iam_info:
description:
- If there is an IAM role associated with the instance, contains information about the last time the instance profile was updated,
including the instance's LastUpdated date, InstanceProfileArn, and InstanceProfileId. Otherwise, not present.
type: complex
sample: ""
contains:
LastUpdated:
description: The last time which InstanceProfile is associated with the Instance changed.
type: str
InstanceProfileArn:
description: The ARN of the InstanceProfile associated with the Instance.
type: str
InstanceProfileId:
description: The Id of the InstanceProfile associated with the Instance.
type: str
ansible_ec2_iam_info_instanceprofilearn:
description: The IAM instance profile ARN.
type: str
sample: "arn:aws:iam::<account id>:instance-profile/<role name>"
ansible_ec2_iam_info_instanceprofileid:
description: IAM instance profile ID.
type: str
sample: ""
ansible_ec2_iam_info_lastupdated:
description: IAM info last updated time.
type: str
sample: "2017-05-12T02:42:27Z"
ansible_ec2_iam_instance_profile_role:
description: IAM instance role.
type: str
sample: "role_name"
ansible_ec2_iam_security_credentials_<role name>:
description:
- If there is an IAM role associated with the instance, role-name is the name of the role,
and role-name contains the temporary security credentials associated with the role. Otherwise, not present.
type: str
sample: ""
ansible_ec2_iam_security_credentials_<role name>_accesskeyid:
description: IAM role access key ID.
type: str
sample: ""
ansible_ec2_iam_security_credentials_<role name>_code:
description: IAM code.
type: str
sample: "Success"
ansible_ec2_iam_security_credentials_<role name>_expiration:
description: IAM role credentials expiration time.
type: str
sample: "2017-05-12T09:11:41Z"
ansible_ec2_iam_security_credentials_<role name>_lastupdated:
description: IAM role last updated time.
type: str
sample: "2017-05-12T02:40:44Z"
ansible_ec2_iam_security_credentials_<role name>_secretaccesskey:
description: IAM role secret access key.
type: str
sample: ""
ansible_ec2_iam_security_credentials_<role name>_token:
description: IAM role token.
type: str
sample: ""
ansible_ec2_iam_security_credentials_<role name>_type:
description: IAM role type.
type: str
sample: "AWS-HMAC"
ansible_ec2_instance_action:
description: Notifies the instance that it should reboot in preparation for bundling.
type: str
sample: "none"
ansible_ec2_instance_id:
description: The ID of this instance.
type: str
sample: "i-XXXXXXXXXXXXXXXXX"
ansible_ec2_instance_identity_document:
description: JSON containing instance attributes, such as instance-id, private IP address, etc.
type: str
sample: ""
ansible_ec2_instance_identity_document_accountid:
description: ""
type: str
sample: "012345678901"
ansible_ec2_instance_identity_document_architecture:
description: Instance system architecture.
type: str
sample: "x86_64"
ansible_ec2_instance_identity_document_availabilityzone:
description: The Availability Zone in which the instance launched.
type: str
sample: "us-east-1a"
ansible_ec2_instance_identity_document_billingproducts:
description: Billing products for this instance.
type: str
sample: ""
ansible_ec2_instance_identity_document_devpayproductcodes:
description: Product codes for the launched AMI.
type: str
sample: ""
ansible_ec2_instance_identity_document_imageid:
description: The AMI ID used to launch the instance.
type: str
sample: "ami-01234567"
ansible_ec2_instance_identity_document_instanceid:
description: The ID of this instance.
type: str
sample: "i-0123456789abcdef0"
ansible_ec2_instance_identity_document_instancetype:
description: The type of instance.
type: str
sample: "m4.large"
ansible_ec2_instance_identity_document_kernelid:
description: The ID of the kernel launched with this instance, if applicable.
type: str
sample: ""
ansible_ec2_instance_identity_document_pendingtime:
description: The instance pending time.
type: str
sample: "2017-05-11T20:51:20Z"
ansible_ec2_instance_identity_document_privateip:
description:
- The private IPv4 address of the instance.
- In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
type: str
sample: "10.0.0.1"
ansible_ec2_instance_identity_document_ramdiskid:
description: The ID of the RAM disk specified at launch time, if applicable.
type: str
sample: ""
ansible_ec2_instance_identity_document_region:
description: The Region in which the instance launched.
type: str
sample: "us-east-1"
ansible_ec2_instance_identity_document_version:
description: Identity document version.
type: str
sample: "2010-08-31"
ansible_ec2_instance_identity_pkcs7:
description: Used to verify the document's authenticity and content against the signature.
type: str
sample: ""
ansible_ec2_instance_identity_rsa2048:
description: Used to verify the document's authenticity and content against the signature.
type: str
sample: ""
ansible_ec2_instance_identity_signature:
description: Data that can be used by other parties to verify its origin and authenticity.
type: str
sample: ""
ansible_ec2_instance_type:
description: The type of instance.
type: str
sample: "m4.large"
ansible_ec2_local_hostname:
description:
- The private IPv4 DNS hostname of the instance.
- In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
type: str
sample: "ip-10-0-0-1.ec2.internal"
ansible_ec2_local_ipv4:
description:
- The private IPv4 address of the instance.
- In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
type: str
sample: "10.0.0.1"
ansible_ec2_mac:
description:
- The instance's media access control (MAC) address.
- In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
type: str
sample: "00:11:22:33:44:55"
ansible_ec2_metrics_vhostmd:
description: Metrics.
type: str
sample: ""
ansible_ec2_network_interfaces_macs_<mac address>_device_number:
description:
- The unique device number associated with that interface. The device number corresponds to the device name;
for example, a device-number of 2 is for the eth2 device.
- This category corresponds to the DeviceIndex and device-index fields that are used by the Amazon EC2 API and the EC2 commands for the AWS CLI.
type: str
sample: "0"
ansible_ec2_network_interfaces_macs_<mac address>_interface_id:
description: The elastic network interface ID.
type: str
sample: "eni-12345678"
ansible_ec2_network_interfaces_macs_<mac address>_ipv4_associations_<ip address>:
description: The private IPv4 addresses that are associated with each public-ip address and assigned to that interface.
type: str
sample: ""
ansible_ec2_network_interfaces_macs_<mac address>_ipv6s:
description: The IPv6 addresses associated with the interface. Returned only for instances launched into a VPC.
type: str
sample: ""
ansible_ec2_network_interfaces_macs_<mac address>_local_hostname:
description: The interface's local hostname.
type: str
sample: ""
ansible_ec2_network_interfaces_macs_<mac address>_local_ipv4s:
description: The private IPv4 addresses associated with the interface.
type: str
sample: ""
ansible_ec2_network_interfaces_macs_<mac address>_mac:
description: The instance's MAC address.
type: str
sample: "00:11:22:33:44:55"
ansible_ec2_network_interfaces_macs_<mac address>_owner_id:
description:
- The ID of the owner of the network interface.
- In multiple-interface environments, an interface can be attached by a third party, such as Elastic Load Balancing.
- Traffic on an interface is always billed to the interface owner.
type: str
sample: "01234567890"
ansible_ec2_network_interfaces_macs_<mac address>_public_hostname:
description:
- The interface's public DNS (IPv4). If the instance is in a VPC,
this category is only returned if the enableDnsHostnames attribute is set to true.
type: str
sample: "ec2-1-2-3-4.compute-1.amazonaws.com"
ansible_ec2_network_interfaces_macs_<mac address>_public_ipv4s:
description: The Elastic IP addresses associated with the interface. There may be multiple IPv4 addresses on an instance.
type: str
sample: "1.2.3.4"
ansible_ec2_network_interfaces_macs_<mac address>_security_group_ids:
description: The IDs of the security groups to which the network interface belongs. Returned only for instances launched into a VPC.
type: str
sample: "sg-01234567,sg-01234568"
ansible_ec2_network_interfaces_macs_<mac address>_security_groups:
description: Security groups to which the network interface belongs. Returned only for instances launched into a VPC.
type: str
sample: "secgroup1,secgroup2"
ansible_ec2_network_interfaces_macs_<mac address>_subnet_id:
description: The ID of the subnet in which the interface resides. Returned only for instances launched into a VPC.
type: str
sample: "subnet-01234567"
ansible_ec2_network_interfaces_macs_<mac address>_subnet_ipv4_cidr_block:
description: The IPv4 CIDR block of the subnet in which the interface resides. Returned only for instances launched into a VPC.
type: str
sample: "10.0.1.0/24"
ansible_ec2_network_interfaces_macs_<mac address>_subnet_ipv6_cidr_blocks:
description: The IPv6 CIDR block of the subnet in which the interface resides. Returned only for instances launched into a VPC.
type: str
sample: ""
ansible_ec2_network_interfaces_macs_<mac address>_vpc_id:
description: The ID of the VPC in which the interface resides. Returned only for instances launched into a VPC.
type: str
sample: "vpc-0123456"
ansible_ec2_network_interfaces_macs_<mac address>_vpc_ipv4_cidr_block:
description: The IPv4 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC.
type: str
sample: "10.0.0.0/16"
ansible_ec2_network_interfaces_macs_<mac address>_vpc_ipv4_cidr_blocks:
description: The IPv4 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC.
type: str
sample: "10.0.0.0/16"
ansible_ec2_network_interfaces_macs_<mac address>_vpc_ipv6_cidr_blocks:
description: The IPv6 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC.
type: str
sample: ""
ansible_ec2_placement_availability_zone:
description: The Availability Zone in which the instance launched.
type: str
sample: "us-east-1a"
ansible_ec2_placement_region:
description: The Region in which the instance launched.
type: str
sample: "us-east-1"
ansible_ec2_product_codes:
description: Product codes associated with the instance, if any.
type: str
sample: "aw0evgkw8e5c1q413zgy5pjce"
ansible_ec2_profile:
description: EC2 instance hardware profile.
type: str
sample: "default-hvm"
ansible_ec2_public_hostname:
description:
- The instance's public DNS. If the instance is in a VPC, this category is only returned if the enableDnsHostnames attribute is set to true.
type: str
sample: "ec2-1-2-3-4.compute-1.amazonaws.com"
ansible_ec2_public_ipv4:
description: The public IPv4 address. If an Elastic IP address is associated with the instance, the value returned is the Elastic IP address.
type: str
sample: "1.2.3.4"
ansible_ec2_public_key:
description: Public key. Only available if supplied at instance launch time.
type: str
sample: ""
ansible_ec2_ramdisk_id:
description: The ID of the RAM disk specified at launch time, if applicable.
type: str
sample: ""
ansible_ec2_reservation_id:
description: The ID of the reservation.
type: str
sample: "r-0123456789abcdef0"
ansible_ec2_security_groups:
description:
- The names of the security groups applied to the instance. After launch, you can only change the security groups of instances running in a VPC.
- Such changes are reflected here and in network/interfaces/macs/mac/security-groups.
type: str
sample: "securitygroup1,securitygroup2"
ansible_ec2_services_domain:
description: The domain for AWS resources for the region; for example, amazonaws.com for us-east-1.
type: str
sample: "amazonaws.com"
ansible_ec2_services_partition:
description:
- The partition that the resource is in. For standard AWS regions, the partition is aws.
- If you have resources in other partitions, the partition is aws-partitionname.
- For example, the partition for resources in the China (Beijing) region is aws-cn.
type: str
sample: "aws"
ansible_ec2_spot_termination_time:
description:
- The approximate time, in UTC, that the operating system for your Spot instance will receive the shutdown signal.
- This item is present and contains a time value only if the Spot instance has been marked for termination by Amazon EC2.
- The termination-time item is not set to a time if you terminated the Spot instance yourself.
type: str
sample: "2015-01-05T18:02:00Z"
ansible_ec2_user_data:
description: The instance user data.
type: str
sample: "#!/bin/bash"
'''
import json
import re
import socket
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six.moves.urllib.parse import quote
socket.setdefaulttimeout(5)
class Ec2Metadata(object):
ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/'
ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key'
ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/'
ec2_dynamicdata_uri = 'http://169.254.169.254/latest/dynamic/'
def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None, ec2_dynamicdata_uri=None):
self.module = module
self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri
self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri
self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri
self.uri_dynamic = ec2_dynamicdata_uri or self.ec2_dynamicdata_uri
self._data = {}
self._prefix = 'ansible_ec2_%s'
def _fetch(self, url):
encoded_url = quote(url, safe='%/:=&?~#+!$,;\'@()*[]')
response, info = fetch_url(self.module, encoded_url, force=True)
if info.get('status') not in (200, 404):
time.sleep(3)
# request went bad, retry once then raise
self.module.warn('Retrying query to metadata service. First attempt failed: {0}'.format(info['msg']))
response, info = fetch_url(self.module, encoded_url, force=True)
if info.get('status') not in (200, 404):
# fail out now
self.module.fail_json(msg='Failed to retrieve metadata from AWS: {0}'.format(info['msg']), response=info)
if response:
data = response.read()
else:
data = None
return to_text(data)
def _mangle_fields(self, fields, uri, filter_patterns=None):
filter_patterns = ['public-keys-0'] if filter_patterns is None else filter_patterns
new_fields = {}
for key, value in fields.items():
split_fields = key[len(uri):].split('/')
# Parse out the IAM role name (which is _not_ the same as the instance profile name)
if len(split_fields) == 3 and split_fields[0:2] == ['iam', 'security-credentials'] and ':' not in split_fields[2]:
new_fields[self._prefix % "iam-instance-profile-role"] = split_fields[2]
if len(split_fields) > 1 and split_fields[1]:
new_key = "-".join(split_fields)
new_fields[self._prefix % new_key] = value
else:
new_key = "".join(split_fields)
new_fields[self._prefix % new_key] = value
for pattern in filter_patterns:
for key in dict(new_fields):
match = re.search(pattern, key)
if match:
new_fields.pop(key)
return new_fields
def fetch(self, uri, recurse=True):
raw_subfields = self._fetch(uri)
if not raw_subfields:
return
subfields = raw_subfields.split('\n')
for field in subfields:
if field.endswith('/') and recurse:
self.fetch(uri + field)
if uri.endswith('/'):
new_uri = uri + field
else:
new_uri = uri + '/' + field
if new_uri not in self._data and not new_uri.endswith('/'):
content = self._fetch(new_uri)
if field == 'security-groups' or field == 'security-group-ids':
sg_fields = ",".join(content.split('\n'))
self._data['%s' % (new_uri)] = sg_fields
else:
try:
dict = json.loads(content)
self._data['%s' % (new_uri)] = content
for (key, value) in dict.items():
self._data['%s:%s' % (new_uri, key.lower())] = value
except Exception:
self._data['%s' % (new_uri)] = content # not a stringified JSON string
def fix_invalid_varnames(self, data):
"""Change ':'' and '-' to '_' to ensure valid template variable names"""
new_data = data.copy()
for key, value in data.items():
if ':' in key or '-' in key:
newkey = re.sub(':|-', '_', key)
new_data[newkey] = value
del new_data[key]
return new_data
def run(self):
self.fetch(self.uri_meta) # populate _data with metadata
data = self._mangle_fields(self._data, self.uri_meta)
data[self._prefix % 'user-data'] = self._fetch(self.uri_user)
data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh)
self._data = {} # clear out metadata in _data
self.fetch(self.uri_dynamic) # populate _data with dynamic data
dyndata = self._mangle_fields(self._data, self.uri_dynamic)
data.update(dyndata)
data = self.fix_invalid_varnames(data)
# Maintain old key for backwards compatibility
if 'ansible_ec2_instance_identity_document_region' in data:
data['ansible_ec2_placement_region'] = data['ansible_ec2_instance_identity_document_region']
return data
def main():
module = AnsibleModule(
argument_spec={},
supports_check_mode=True,
)
ec2_metadata_facts = Ec2Metadata(module).run()
ec2_metadata_facts_result = dict(changed=False, ansible_facts=ec2_metadata_facts)
module.exit_json(**ec2_metadata_facts_result)
if __name__ == '__main__':
main()

@ -1,336 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_snapshot
short_description: Creates a snapshot from an existing volume
description:
- Creates an EC2 snapshot from an existing EBS volume.
version_added: "1.5"
options:
volume_id:
description:
- Volume from which to take the snapshot.
required: false
type: str
description:
description:
- Description to be applied to the snapshot.
required: false
type: str
instance_id:
description:
- Instance that has the required volume to snapshot mounted.
required: false
type: str
device_name:
description:
- Device name of a mounted volume to be snapshotted.
required: false
type: str
snapshot_tags:
description:
- A dictionary of tags to add to the snapshot.
type: dict
required: false
version_added: "1.6"
wait:
description:
- Wait for the snapshot to be ready.
type: bool
required: false
default: yes
version_added: "1.5.1"
wait_timeout:
description:
- How long before wait gives up, in seconds.
- Specify 0 to wait forever.
required: false
default: 0
version_added: "1.5.1"
type: int
state:
description:
- Whether to add or create a snapshot.
required: false
default: present
choices: ['absent', 'present']
version_added: "1.9"
type: str
snapshot_id:
description:
- Snapshot id to remove.
required: false
version_added: "1.9"
type: str
last_snapshot_min_age:
description:
- If the volume's most recent snapshot has started less than `last_snapshot_min_age' minutes ago, a new snapshot will not be created.
required: false
default: 0
version_added: "2.0"
type: int
author: "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple snapshot of volume using volume_id
- ec2_snapshot:
volume_id: vol-abcdef12
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume mounted on device_name attached to instance_id
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume with tagging
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
snapshot_tags:
frequency: hourly
source: /data
# Remove a snapshot
- local_action:
module: ec2_snapshot
snapshot_id: snap-abcd1234
state: absent
# Create a snapshot only if the most recent one is older than 1 hour
- local_action:
module: ec2_snapshot
volume_id: vol-abcdef12
last_snapshot_min_age: 60
'''
RETURN = '''
snapshot_id:
description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created.
type: str
returned: always
sample: snap-01234567
tags:
description: Any tags assigned to the snapshot.
type: dict
returned: always
sample: "{ 'Name': 'instance-name' }"
volume_id:
description: The ID of the volume that was used to create the snapshot.
type: str
returned: always
sample: vol-01234567
volume_size:
description: The size of the volume, in GiB.
type: int
returned: always
sample: 8
'''
import time
import datetime
try:
import boto.exception
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect
# Find the most recent snapshot
def _get_snapshot_starttime(snap):
return datetime.datetime.strptime(snap.start_time, '%Y-%m-%dT%H:%M:%S.%fZ')
def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None):
"""
Gets the most recently created snapshot and optionally filters the result
if the snapshot is too old
:param snapshots: list of snapshots to search
:param max_snapshot_age_secs: filter the result if its older than this
:param now: simulate time -- used for unit testing
:return:
"""
if len(snapshots) == 0:
return None
if not now:
now = datetime.datetime.utcnow()
youngest_snapshot = max(snapshots, key=_get_snapshot_starttime)
# See if the snapshot is younger that the given max age
snapshot_start = datetime.datetime.strptime(youngest_snapshot.start_time, '%Y-%m-%dT%H:%M:%S.%fZ')
snapshot_age = now - snapshot_start
if max_snapshot_age_secs is not None:
if snapshot_age.total_seconds() > max_snapshot_age_secs:
return None
return youngest_snapshot
def _create_with_wait(snapshot, wait_timeout_secs, sleep_func=time.sleep):
"""
Wait for the snapshot to be created
:param snapshot:
:param wait_timeout_secs: fail this step after this many seconds
:param sleep_func:
:return:
"""
time_waited = 0
snapshot.update()
while snapshot.status != 'completed':
sleep_func(3)
snapshot.update()
time_waited += 3
if wait_timeout_secs and time_waited > wait_timeout_secs:
return False
return True
def create_snapshot(module, ec2, state=None, description=None, wait=None,
wait_timeout=None, volume_id=None, instance_id=None,
snapshot_id=None, device_name=None, snapshot_tags=None,
last_snapshot_min_age=None):
snapshot = None
changed = False
required = [volume_id, snapshot_id, instance_id]
if required.count(None) != len(required) - 1: # only 1 must be set
module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified')
if instance_id and not device_name or device_name and not instance_id:
module.fail_json(msg='Instance ID and device name must both be specified')
if instance_id:
try:
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if not volumes:
module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
volume_id = volumes[0].id
if state == 'absent':
if not snapshot_id:
module.fail_json(msg='snapshot_id must be set when state is absent')
try:
ec2.delete_snapshot(snapshot_id)
except boto.exception.BotoServerError as e:
# exception is raised if snapshot does not exist
if e.error_code == 'InvalidSnapshot.NotFound':
module.exit_json(changed=False)
else:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
# successful delete
module.exit_json(changed=True)
if last_snapshot_min_age > 0:
try:
current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id})
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
snapshot = _get_most_recent_snapshot(current_snapshots,
max_snapshot_age_secs=last_snapshot_min_age)
try:
# Create a new snapshot if we didn't find an existing one to use
if snapshot is None:
snapshot = ec2.create_snapshot(volume_id, description=description)
changed = True
if wait:
if not _create_with_wait(snapshot, wait_timeout):
module.fail_json(msg='Timed out while creating snapshot.')
if snapshot_tags:
for k, v in snapshot_tags.items():
snapshot.add_tag(k, v)
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
module.exit_json(changed=changed,
snapshot_id=snapshot.id,
volume_id=snapshot.volume_id,
volume_size=snapshot.volume_size,
tags=snapshot.tags.copy())
def create_snapshot_ansible_module():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
volume_id=dict(),
description=dict(),
instance_id=dict(),
snapshot_id=dict(),
device_name=dict(),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=0),
last_snapshot_min_age=dict(type='int', default=0),
snapshot_tags=dict(type='dict', default=dict()),
state=dict(choices=['absent', 'present'], default='present'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
return module
def main():
module = create_snapshot_ansible_module()
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
volume_id = module.params.get('volume_id')
snapshot_id = module.params.get('snapshot_id')
description = module.params.get('description')
instance_id = module.params.get('instance_id')
device_name = module.params.get('device_name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
last_snapshot_min_age = module.params.get('last_snapshot_min_age')
snapshot_tags = module.params.get('snapshot_tags')
state = module.params.get('state')
ec2 = ec2_connect(module)
create_snapshot(
module=module,
state=state,
description=description,
wait=wait,
wait_timeout=wait_timeout,
ec2=ec2,
volume_id=volume_id,
instance_id=instance_id,
snapshot_id=snapshot_id,
device_name=device_name,
snapshot_tags=snapshot_tags,
last_snapshot_min_age=last_snapshot_min_age
)
if __name__ == '__main__':
main()

@ -1,258 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_snapshot_info
short_description: Gather information about ec2 volume snapshots in AWS
description:
- Gather information about ec2 volume snapshots in AWS.
- This module was called C(ec2_snapshot_facts) before Ansible 2.9. The usage did not change.
version_added: "2.1"
requirements: [ boto3 ]
author: "Rob White (@wimnat)"
options:
snapshot_ids:
description:
- If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned.
required: false
default: []
type: list
elements: str
owner_ids:
description:
- If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have
access are returned.
required: false
default: []
type: list
elements: str
restorable_by_user_ids:
description:
- If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are
returned.
required: false
default: []
type: list
elements: str
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) for possible filters. Filter
names and values are case sensitive.
required: false
type: dict
default: {}
notes:
- By default, the module will return all snapshots, including public ones. To limit results to snapshots owned by
the account use the filter 'owner-id'.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all snapshots, including public ones
- ec2_snapshot_info:
# Gather information about all snapshots owned by the account 0123456789
- ec2_snapshot_info:
filters:
owner-id: 0123456789
# Or alternatively...
- ec2_snapshot_info:
owner_ids:
- 0123456789
# Gather information about a particular snapshot using ID
- ec2_snapshot_info:
filters:
snapshot-id: snap-00112233
# Or alternatively...
- ec2_snapshot_info:
snapshot_ids:
- snap-00112233
# Gather information about any snapshot with a tag key Name and value Example
- ec2_snapshot_info:
filters:
"tag:Name": Example
# Gather information about any snapshot with an error status
- ec2_snapshot_info:
filters:
status: error
'''
RETURN = '''
snapshot_id:
description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created.
type: str
returned: always
sample: snap-01234567
volume_id:
description: The ID of the volume that was used to create the snapshot.
type: str
returned: always
sample: vol-01234567
state:
description: The snapshot state (completed, pending or error).
type: str
returned: always
sample: completed
state_message:
description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper
AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the
error occurred.
type: str
returned: always
sample:
start_time:
description: The time stamp when the snapshot was initiated.
type: str
returned: always
sample: "2015-02-12T02:14:02+00:00"
progress:
description: The progress of the snapshot, as a percentage.
type: str
returned: always
sample: "100%"
owner_id:
description: The AWS account ID of the EBS snapshot owner.
type: str
returned: always
sample: "099720109477"
description:
description: The description for the snapshot.
type: str
returned: always
sample: "My important backup"
volume_size:
description: The size of the volume, in GiB.
type: int
returned: always
sample: 8
owner_alias:
description: The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot.
type: str
returned: always
sample: "033440102211"
tags:
description: Any tags assigned to the snapshot.
type: dict
returned: always
sample: "{ 'my_tag_key': 'my_tag_value' }"
encrypted:
description: Indicates whether the snapshot is encrypted.
type: bool
returned: always
sample: "True"
kms_key_id:
description: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to \
protect the volume encryption key for the parent volume.
type: str
returned: always
sample: "74c9742a-a1b2-45cb-b3fe-abcdef123456"
data_encryption_key_id:
description: The data encryption key identifier for the snapshot. This value is a unique identifier that \
corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy.
type: str
returned: always
sample: "arn:aws:kms:ap-southeast-2:012345678900:key/74c9742a-a1b2-45cb-b3fe-abcdef123456"
'''
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info)
def list_ec2_snapshots(connection, module):
snapshot_ids = module.params.get("snapshot_ids")
owner_ids = [str(owner_id) for owner_id in module.params.get("owner_ids")]
restorable_by_user_ids = [str(user_id) for user_id in module.params.get("restorable_by_user_ids")]
filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
try:
snapshots = connection.describe_snapshots(SnapshotIds=snapshot_ids, OwnerIds=owner_ids, RestorableByUserIds=restorable_by_user_ids, Filters=filters)
except ClientError as e:
if e.response['Error']['Code'] == "InvalidSnapshot.NotFound":
if len(snapshot_ids) > 1:
module.warn("Some of your snapshots may exist, but %s" % str(e))
snapshots = {'Snapshots': []}
else:
module.fail_json(msg="Failed to describe snapshots: %s" % str(e))
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_snapshots = []
for snapshot in snapshots['Snapshots']:
snaked_snapshots.append(camel_dict_to_snake_dict(snapshot))
# Turn the boto3 result in to ansible friendly tag dictionary
for snapshot in snaked_snapshots:
if 'tags' in snapshot:
snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags'], 'key', 'value')
module.exit_json(snapshots=snaked_snapshots)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
snapshot_ids=dict(default=[], type='list'),
owner_ids=dict(default=[], type='list'),
restorable_by_user_ids=dict(default=[], type='list'),
filters=dict(default={}, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[
['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters']
]
)
if module._name == 'ec2_snapshot_facts':
module.deprecate("The 'ec2_snapshot_facts' module has been renamed to 'ec2_snapshot_info'", version='2.13')
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
list_ec2_snapshots(connection, module)
if __name__ == '__main__':
main()

@ -1,201 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_tag
short_description: create and remove tags on ec2 resources
description:
- Creates, modifies and removes tags for any EC2 resource.
- Resources are referenced by their resource id (for example, an instance being i-XXXXXXX, a VPC being vpc-XXXXXXX).
- This module is designed to be used with complex args (tags), see the examples.
version_added: "1.3"
requirements: [ "boto3", "botocore" ]
options:
resource:
description:
- The EC2 resource id.
required: true
type: str
state:
description:
- Whether the tags should be present or absent on the resource.
- The use of I(state=list) to interrogate the tags of an instance has been
deprecated and will be removed in Anisble 2.14. The 'list'
functionality has been moved to a dedicated module M(ec2_tag_info).
default: present
choices: ['present', 'absent', 'list']
type: str
tags:
description:
- A dictionary of tags to add or remove from the resource.
- If the value provided for a key is not set and I(state=absent), the tag will be removed regardless of its current value.
- Required when I(state=present) or I(state=absent).
type: dict
purge_tags:
description:
- Whether unspecified tags should be removed from the resource.
- Note that when combined with I(state=absent), specified tags with non-matching values are not purged.
type: bool
default: false
version_added: '2.7'
author:
- Lester Wade (@lwade)
- Paul Arthur (@flowerysong)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Ensure tags are present on a resource
ec2_tag:
region: eu-west-1
resource: vol-XXXXXX
state: present
tags:
Name: ubervol
env: prod
- name: Ensure all volumes are tagged
ec2_tag:
region: eu-west-1
resource: '{{ item.id }}'
state: present
tags:
Name: dbserver
Env: production
loop: '{{ ec2_vol.volumes }}'
- name: Remove the Env tag
ec2_tag:
region: eu-west-1
resource: i-xxxxxxxxxxxxxxxxx
tags:
Env:
state: absent
- name: Remove the Env tag if it's currently 'development'
ec2_tag:
region: eu-west-1
resource: i-xxxxxxxxxxxxxxxxx
tags:
Env: development
state: absent
- name: Remove all tags except for Name from an instance
ec2_tag:
region: eu-west-1
resource: i-xxxxxxxxxxxxxxxxx
tags:
Name: ''
state: absent
purge_tags: true
'''
RETURN = '''
tags:
description: A dict containing the tags on the resource
returned: always
type: dict
added_tags:
description: A dict of tags that were added to the resource
returned: If tags were added
type: dict
removed_tags:
description: A dict of tags that were removed from the resource
returned: If tags were removed
type: dict
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list, compare_aws_tags
try:
from botocore.exceptions import BotoCoreError, ClientError
except Exception:
pass # Handled by AnsibleAWSModule
def get_tags(ec2, module, resource):
filters = [{'Name': 'resource-id', 'Values': [resource]}]
try:
return boto3_tag_list_to_ansible_dict(ec2.describe_tags(Filters=filters)['Tags'])
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to fetch tags for resource {0}'.format(resource))
def main():
argument_spec = dict(
resource=dict(required=True),
tags=dict(type='dict'),
purge_tags=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent', 'list']),
)
required_if = [('state', 'present', ['tags']), ('state', 'absent', ['tags'])]
module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True)
resource = module.params['resource']
tags = module.params['tags']
state = module.params['state']
purge_tags = module.params['purge_tags']
result = {'changed': False}
ec2 = module.client('ec2')
current_tags = get_tags(ec2, module, resource)
if state == 'list':
module.deprecate(
'Using the "list" state has been deprecated. Please use the ec2_tag_info module instead', version='2.14')
module.exit_json(changed=False, tags=current_tags)
add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags)
remove_tags = {}
if state == 'absent':
for key in tags:
if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]):
remove_tags[key] = current_tags[key]
for key in remove:
remove_tags[key] = current_tags[key]
if remove_tags:
result['changed'] = True
result['removed_tags'] = remove_tags
if not module.check_mode:
try:
ec2.delete_tags(Resources=[resource], Tags=ansible_dict_to_boto3_tag_list(remove_tags))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource))
if state == 'present' and add_tags:
result['changed'] = True
result['added_tags'] = add_tags
current_tags.update(add_tags)
if not module.check_mode:
try:
ec2.create_tags(Resources=[resource], Tags=ansible_dict_to_boto3_tag_list(add_tags))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource))
result['tags'] = get_tags(ec2, module, resource)
module.exit_json(**result)
if __name__ == '__main__':
main()

@ -1,92 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_tag_info
short_description: list tags on ec2 resources
description:
- Lists tags for any EC2 resource.
- Resources are referenced by their resource id (e.g. an instance being i-XXXXXXX, a vpc being vpc-XXXXXX).
- Resource tags can be managed using the M(ec2_tag) module.
version_added: "2.10"
requirements: [ "boto3", "botocore" ]
options:
resource:
description:
- The EC2 resource id (for example i-XXXXXX or vpc-XXXXXX).
required: true
type: str
author:
- Mark Chappell (@tremble)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
- name: Retrieve all tags on an instance
ec2_tag_info:
region: eu-west-1
resource: i-xxxxxxxxxxxxxxxxx
register: instance_tags
- name: Retrieve all tags on a VPC
ec2_tag_info:
region: eu-west-1
resource: vpc-xxxxxxxxxxxxxxxxx
register: vpc_tags
'''
RETURN = '''
tags:
description: A dict containing the tags on the resource
returned: always
type: dict
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, AWSRetry
try:
from botocore.exceptions import BotoCoreError, ClientError
except Exception:
pass # Handled by AnsibleAWSModule
@AWSRetry.jittered_backoff()
def get_tags(ec2, module, resource):
filters = [{'Name': 'resource-id', 'Values': [resource]}]
return boto3_tag_list_to_ansible_dict(ec2.describe_tags(Filters=filters)['Tags'])
def main():
argument_spec = dict(
resource=dict(required=True),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
resource = module.params['resource']
ec2 = module.client('ec2')
try:
current_tags = get_tags(ec2, module, resource)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to fetch tags for resource {0}'.format(resource))
module.exit_json(changed=False, tags=current_tags)
if __name__ == '__main__':
main()

@ -1,632 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_vol
short_description: Create and attach a volume, return volume id and device map
description:
- Creates an EBS volume and optionally attaches it to an instance.
- If both I(instance) and I(name) are given and the instance has a device at the device name, then no volume is created and no attachment is made.
- This module has a dependency on python-boto.
version_added: "1.1"
options:
instance:
description:
- Instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach.
type: str
name:
description:
- Volume Name tag if you wish to attach an existing volume (requires instance)
version_added: "1.6"
type: str
id:
description:
- Volume id if you wish to attach an existing volume (requires instance) or remove an existing volume
version_added: "1.6"
type: str
volume_size:
description:
- Size of volume (in GiB) to create.
type: int
volume_type:
description:
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS), st1 (Throughput Optimized HDD), sc1 (Cold HDD).
"Standard" is the old EBS default and continues to remain the Ansible default for backwards compatibility.
default: standard
version_added: "1.9"
choices: ['standard', 'gp2', 'io1', 'st1', 'sc1']
type: str
iops:
description:
- The provisioned IOPs you want to associate with this volume (integer).
- By default AWS will set this to 100.
version_added: "1.3"
type: int
encrypted:
description:
- Enable encryption at rest for this volume.
default: false
type: bool
version_added: "1.8"
kms_key_id:
description:
- Specify the id of the KMS key to use.
version_added: "2.3"
type: str
device_name:
description:
- Device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows.
type: str
delete_on_termination:
description:
- When set to C(true), the volume will be deleted upon instance termination.
type: bool
default: false
version_added: "2.1"
zone:
description:
- Zone in which to create the volume, if unset uses the zone the instance is in (if set).
aliases: ['availability_zone', 'aws_zone', 'ec2_zone']
type: str
snapshot:
description:
- Snapshot ID on which to base the volume.
version_added: "1.5"
type: str
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
type: bool
default: true
version_added: "1.5"
state:
description:
- Whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8).
default: present
choices: ['absent', 'present', 'list']
version_added: "1.6"
type: str
tags:
description:
- tag:value pairs to add to the volume after creation.
default: {}
version_added: "2.3"
type: dict
author: "Lester Wade (@lwade)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple attachment action
- ec2_vol:
instance: XXXXXX
volume_size: 5
device_name: sdd
# Example using custom iops params
- ec2_vol:
instance: XXXXXX
volume_size: 5
iops: 100
device_name: sdd
# Example using snapshot id
- ec2_vol:
instance: XXXXXX
snapshot: "{{ snapshot }}"
# Playbook example combined with instance launch
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
wait: yes
count: 3
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
volume_size: 5
loop: "{{ ec2.instances }}"
register: ec2_vol
# Example: Launch an instance and then add a volume if not already attached
# * Volume will be created with the given name if not already created.
# * Nothing will happen if the volume is already attached.
# * Requires Ansible 2.0
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
zone: YYYYYY
id: my_instance
wait: yes
count: 1
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
name: my_existing_volume_Name_tag
device_name: /dev/xvdf
loop: "{{ ec2.instances }}"
register: ec2_vol
# Remove a volume
- ec2_vol:
id: vol-XXXXXXXX
state: absent
# Detach a volume (since 1.9)
- ec2_vol:
id: vol-XXXXXXXX
instance: None
# List volumes for an instance
- ec2_vol:
instance: i-XXXXXX
state: list
# Create new volume using SSD storage
- ec2_vol:
instance: XXXXXX
volume_size: 50
volume_type: gp2
device_name: /dev/xvdf
# Attach an existing volume to instance. The volume will be deleted upon instance termination.
- ec2_vol:
instance: XXXXXX
id: XXXXXX
device_name: /dev/sdf
delete_on_termination: yes
'''
RETURN = '''
device:
description: device name of attached volume
returned: when success
type: str
sample: "/def/sdf"
volume_id:
description: the id of volume
returned: when success
type: str
sample: "vol-35b333d9"
volume_type:
description: the volume type
returned: when success
type: str
sample: "standard"
volume:
description: a dictionary containing detailed attributes of the volume
returned: when success
type: str
sample: {
"attachment_set": {
"attach_time": "2015-10-23T00:22:29.000Z",
"deleteOnTermination": "false",
"device": "/dev/sdf",
"instance_id": "i-8356263c",
"status": "attached"
},
"create_time": "2015-10-21T14:36:08.870Z",
"encrypted": false,
"id": "vol-35b333d9",
"iops": null,
"size": 1,
"snapshot_id": "",
"status": "in-use",
"tags": {
"env": "dev"
},
"type": "standard",
"zone": "us-east-1b"
}
'''
import time
from distutils.version import LooseVersion
try:
import boto
import boto.ec2
import boto.exception
from boto.exception import BotoServerError
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO, AnsibleAWSError, connect_to_aws, ec2_argument_spec,
get_aws_connection_info)
def get_volume(module, ec2):
name = module.params.get('name')
id = module.params.get('id')
zone = module.params.get('zone')
filters = {}
volume_ids = None
# If no name or id supplied, just try volume creation based on module parameters
if id is None and name is None:
return None
if zone:
filters['availability_zone'] = zone
if name:
filters['tag:Name'] = name
if id:
volume_ids = [id]
try:
vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if not vols:
if id:
msg = "Could not find the volume with id: %s" % id
if name:
msg += (" and name: %s" % name)
module.fail_json(msg=msg)
else:
return None
if len(vols) > 1:
module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
return vols[0]
def get_volumes(module, ec2):
instance = module.params.get('instance')
try:
if not instance:
vols = ec2.get_all_volumes()
else:
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
return vols
def delete_volume(module, ec2):
volume_id = module.params['id']
try:
ec2.delete_volume(volume_id)
module.exit_json(changed=True)
except boto.exception.EC2ResponseError as ec2_error:
if ec2_error.code == 'InvalidVolume.NotFound':
module.exit_json(changed=False)
module.fail_json(msg=ec2_error.message)
def boto_supports_volume_encryption():
"""
Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
def boto_supports_kms_key_id():
"""
Check if Boto library supports kms_key_ids (added in 2.39.0)
Returns:
True if version is equal to or higher then the version needed, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.39.0')
def create_volume(module, ec2, zone):
changed = False
name = module.params.get('name')
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
kms_key_id = module.params.get('kms_key_id')
volume_size = module.params.get('volume_size')
volume_type = module.params.get('volume_type')
snapshot = module.params.get('snapshot')
tags = module.params.get('tags')
# If custom iops is defined we use volume_type "io1" rather than the default of "standard"
if iops:
volume_type = 'io1'
volume = get_volume(module, ec2)
if volume is None:
try:
if boto_supports_volume_encryption():
if kms_key_id is not None:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted, kms_key_id)
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted)
changed = True
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
changed = True
while volume.status != 'available':
time.sleep(3)
volume.update()
if name:
tags["Name"] = name
if tags:
ec2.create_tags([volume.id], tags)
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
return volume, changed
def attach_volume(module, ec2, volume, instance):
device_name = module.params.get('device_name')
delete_on_termination = module.params.get('delete_on_termination')
changed = False
# If device_name isn't set, make a choice based on best practices here:
# https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
# In future this needs to be more dynamic but combining block device mapping best practices
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
# Use password data attribute to tell whether the instance is Windows or Linux
if device_name is None:
try:
if not ec2.get_password_data(instance.id):
device_name = '/dev/sdf'
else:
device_name = '/dev/xvdf'
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if volume.attachment_state() is not None:
adata = volume.attach_data
if adata.instance_id != instance.id:
module.fail_json(msg="Volume %s is already attached to another instance: %s"
% (volume.id, adata.instance_id))
else:
# Volume is already attached to right instance
changed = modify_dot_attribute(module, ec2, instance, device_name)
else:
try:
volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
modify_dot_attribute(module, ec2, instance, device_name)
return volume, changed
def modify_dot_attribute(module, ec2, instance, device_name):
""" Modify delete_on_termination attribute """
delete_on_termination = module.params.get('delete_on_termination')
changed = False
try:
instance.update()
dot = instance.block_device_mapping[device_name].delete_on_termination
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if delete_on_termination != dot:
try:
bdt = BlockDeviceType(delete_on_termination=delete_on_termination)
bdm = BlockDeviceMapping()
bdm[device_name] = bdt
ec2.modify_instance_attribute(instance_id=instance.id, attribute='blockDeviceMapping', value=bdm)
while instance.block_device_mapping[device_name].delete_on_termination != delete_on_termination:
time.sleep(3)
instance.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
return changed
def detach_volume(module, ec2, volume):
changed = False
if volume.attachment_state() is not None:
adata = volume.attach_data
volume.detach()
while volume.attachment_state() is not None:
time.sleep(3)
volume.update()
changed = True
return volume, changed
def get_volume_info(volume, state):
# If we're just listing volumes then do nothing, else get the latest update for the volume
if state != 'list':
volume.update()
volume_info = {}
attachment = volume.attach_data
volume_info = {
'create_time': volume.create_time,
'encrypted': volume.encrypted,
'id': volume.id,
'iops': volume.iops,
'size': volume.size,
'snapshot_id': volume.snapshot_id,
'status': volume.status,
'type': volume.type,
'zone': volume.zone,
'attachment_set': {
'attach_time': attachment.attach_time,
'device': attachment.device,
'instance_id': attachment.instance_id,
'status': attachment.status
},
'tags': volume.tags
}
if hasattr(attachment, 'deleteOnTermination'):
volume_info['attachment_set']['deleteOnTermination'] = attachment.deleteOnTermination
return volume_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance=dict(),
id=dict(),
name=dict(),
volume_size=dict(type='int'),
volume_type=dict(choices=['standard', 'gp2', 'io1', 'st1', 'sc1'], default='standard'),
iops=dict(type='int'),
encrypted=dict(type='bool', default=False),
kms_key_id=dict(),
device_name=dict(),
delete_on_termination=dict(type='bool', default=False),
zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
snapshot=dict(),
state=dict(choices=['absent', 'present', 'list'], default='present'),
tags=dict(type='dict', default={})
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
id = module.params.get('id')
name = module.params.get('name')
instance = module.params.get('instance')
volume_size = module.params.get('volume_size')
encrypted = module.params.get('encrypted')
kms_key_id = module.params.get('kms_key_id')
device_name = module.params.get('device_name')
zone = module.params.get('zone')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
tags = module.params.get('tags')
# Ensure we have the zone or can get the zone
if instance is None and zone is None and state == 'present':
module.fail_json(msg="You must specify either instance or zone")
# Set volume detach flag
if instance == 'None' or instance == '':
instance = None
detach_vol_flag = True
else:
detach_vol_flag = False
# Set changed flag
changed = False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'list':
returned_volumes = []
vols = get_volumes(module, ec2)
for v in vols:
attachment = v.attach_data
returned_volumes.append(get_volume_info(v, state))
module.exit_json(changed=False, volumes=returned_volumes)
if encrypted and not boto_supports_volume_encryption():
module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes")
if kms_key_id is not None and not boto_supports_kms_key_id():
module.fail_json(msg="You must use boto >= v2.39.0 to use kms_key_id")
# Here we need to get the zone info for the instance. This covers situation where
# instance is specified but zone isn't.
# Useful for playbooks chaining instance launch with volume create + attach and where the
# zone doesn't matter to the user.
inst = None
if instance:
try:
reservation = ec2.get_all_instances(instance_ids=instance)
except BotoServerError as e:
module.fail_json(msg=e.message)
inst = reservation[0].instances[0]
zone = inst.placement
# Check if there is a volume already mounted there.
if device_name:
if device_name in inst.block_device_mapping:
module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance),
volume_id=inst.block_device_mapping[device_name].volume_id,
device=device_name,
changed=False)
# Delaying the checks until after the instance check allows us to get volume ids for existing volumes
# without needing to pass an unused volume_size
if not volume_size and not (id or name or snapshot):
module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot")
if volume_size and id:
module.fail_json(msg="Cannot specify volume_size together with id")
if state == 'present':
volume, changed = create_volume(module, ec2, zone)
if detach_vol_flag:
volume, changed = detach_volume(module, ec2, volume)
elif inst is not None:
volume, changed = attach_volume(module, ec2, volume, inst)
# Add device, volume_id and volume_type parameters separately to maintain backward compatibility
volume_info = get_volume_info(volume, state)
# deleteOnTermination is not correctly reflected on attachment
if module.params.get('delete_on_termination'):
for attempt in range(0, 8):
if volume_info['attachment_set'].get('deleteOnTermination') == 'true':
break
time.sleep(5)
volume = ec2.get_all_volumes(volume_ids=volume.id)[0]
volume_info = get_volume_info(volume, state)
module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'],
volume_id=volume_info['id'], volume_type=volume_info['type'])
elif state == 'absent':
delete_volume(module, ec2)
if __name__ == '__main__':
main()

@ -1,141 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_vol_info
short_description: Gather information about ec2 volumes in AWS
description:
- Gather information about ec2 volumes in AWS.
- This module was called C(ec2_vol_facts) before Ansible 2.9. The usage did not change.
version_added: "2.1"
requirements: [ boto3 ]
author: "Rob White (@wimnat)"
options:
filters:
type: dict
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all volumes
- ec2_vol_info:
# Gather information about a particular volume using volume ID
- ec2_vol_info:
filters:
volume-id: vol-00112233
# Gather information about any volume with a tag key Name and value Example
- ec2_vol_info:
filters:
"tag:Name": Example
# Gather information about any volume that is attached
- ec2_vol_info:
filters:
attachment.status: attached
'''
# TODO: Disabled the RETURN as it was breaking docs building. Someone needs to
# fix this
RETURN = '''# '''
import traceback
try:
from botocore.exceptions import ClientError
except ImportError:
pass # caught by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import AWSRetry
from ansible.module_utils.ec2 import boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict
def get_volume_info(volume, region):
attachment = volume["attachments"]
volume_info = {
'create_time': volume["create_time"],
'id': volume["volume_id"],
'encrypted': volume["encrypted"],
'iops': volume["iops"] if "iops" in volume else None,
'size': volume["size"],
'snapshot_id': volume["snapshot_id"],
'status': volume["state"],
'type': volume["volume_type"],
'zone': volume["availability_zone"],
'region': region,
'attachment_set': {
'attach_time': attachment[0]["attach_time"] if len(attachment) > 0 else None,
'device': attachment[0]["device"] if len(attachment) > 0 else None,
'instance_id': attachment[0]["instance_id"] if len(attachment) > 0 else None,
'status': attachment[0]["state"] if len(attachment) > 0 else None,
'delete_on_termination': attachment[0]["delete_on_termination"] if len(attachment) > 0 else None
},
'tags': boto3_tag_list_to_ansible_dict(volume['tags']) if "tags" in volume else None
}
return volume_info
@AWSRetry.jittered_backoff()
def describe_volumes_with_backoff(connection, filters):
paginator = connection.get_paginator('describe_volumes')
return paginator.paginate(Filters=filters).build_full_result()
def list_ec2_volumes(connection, module):
# Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags
sanitized_filters = module.params.get("filters")
for key in list(sanitized_filters):
if not key.startswith("tag:"):
sanitized_filters[key.replace("_", "-")] = sanitized_filters.pop(key)
volume_dict_array = []
try:
all_volumes = describe_volumes_with_backoff(connection, ansible_dict_to_boto3_filter_list(sanitized_filters))
except ClientError as e:
module.fail_json_aws(e, msg="Failed to describe volumes.")
for volume in all_volumes["Volumes"]:
volume = camel_dict_to_snake_dict(volume, ignore_list=['Tags'])
volume_dict_array.append(get_volume_info(volume, module.region))
module.exit_json(volumes=volume_dict_array)
def main():
argument_spec = dict(filters=dict(default={}, type='dict'))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
if module._name == 'ec2_vol_facts':
module.deprecate("The 'ec2_vol_facts' module has been renamed to 'ec2_vol_info'", version='2.13')
connection = module.client('ec2')
list_ec2_volumes(connection, module)
if __name__ == '__main__':
main()

@ -1,414 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ec2_vpc_dhcp_option
short_description: Manages DHCP Options, and can ensure the DHCP options for the given VPC match what's
requested
description:
- This module removes, or creates DHCP option sets, and can associate them to a VPC.
Optionally, a new DHCP Options set can be created that converges a VPC's existing
DHCP option set with values provided.
When dhcp_options_id is provided, the module will
1. remove (with state='absent')
2. ensure tags are applied (if state='present' and tags are provided
3. attach it to a VPC (if state='present' and a vpc_id is provided.
If any of the optional values are missing, they will either be treated
as a no-op (i.e., inherit what already exists for the VPC)
To remove existing options while inheriting, supply an empty value
(e.g. set ntp_servers to [] if you want to remove them from the VPC's options)
Most of the options should be self-explanatory.
author: "Joel Thompson (@joelthompson)"
version_added: 2.1
options:
domain_name:
description:
- The domain name to set in the DHCP option sets
type: str
dns_servers:
description:
- A list of hosts to set the DNS servers for the VPC to. (Should be a
list of IP addresses rather than host names.)
type: list
elements: str
ntp_servers:
description:
- List of hosts to advertise as NTP servers for the VPC.
type: list
elements: str
netbios_name_servers:
description:
- List of hosts to advertise as NetBIOS servers.
type: list
elements: str
netbios_node_type:
description:
- NetBIOS node type to advertise in the DHCP options.
The AWS recommendation is to use 2 (when using netbios name services)
U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html)
type: int
vpc_id:
description:
- VPC ID to associate with the requested DHCP option set.
If no vpc id is provided, and no matching option set is found then a new
DHCP option set is created.
type: str
delete_old:
description:
- Whether to delete the old VPC DHCP option set when associating a new one.
This is primarily useful for debugging/development purposes when you
want to quickly roll back to the old option set. Note that this setting
will be ignored, and the old DHCP option set will be preserved, if it
is in use by any other VPC. (Otherwise, AWS will return an error.)
type: bool
default: 'yes'
inherit_existing:
description:
- For any DHCP options not specified in these parameters, whether to
inherit them from the options set already applied to vpc_id, or to
reset them to be empty.
type: bool
default: 'no'
tags:
description:
- Tags to be applied to a VPC options set if a new one is created, or
if the resource_id is provided. (options must match)
aliases: [ 'resource_tags']
version_added: "2.1"
type: dict
dhcp_options_id:
description:
- The resource_id of an existing DHCP options set.
If this is specified, then it will override other settings, except tags
(which will be updated to match)
version_added: "2.1"
type: str
state:
description:
- create/assign or remove the DHCP options.
If state is set to absent, then a DHCP options set matched either
by id, or tags and options will be removed if possible.
default: present
choices: [ 'absent', 'present' ]
version_added: "2.1"
type: str
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto
"""
RETURN = """
new_options:
description: The DHCP options created, associated or found
returned: when appropriate
type: dict
sample:
domain-name-servers:
- 10.0.0.1
- 10.0.1.1
netbois-name-servers:
- 10.0.0.1
- 10.0.1.1
netbios-node-type: 2
domain-name: "my.example.com"
dhcp_options_id:
description: The aws resource id of the primary DCHP options set created, found or removed
type: str
returned: when available
changed:
description: Whether the dhcp options were changed
type: bool
returned: always
"""
EXAMPLES = """
# Completely overrides the VPC DHCP options associated with VPC vpc-123456 and deletes any existing
# DHCP option set that may have been attached to that VPC.
- ec2_vpc_dhcp_option:
domain_name: "foo.example.com"
region: us-east-1
dns_servers:
- 10.0.0.1
- 10.0.1.1
ntp_servers:
- 10.0.0.2
- 10.0.1.2
netbios_name_servers:
- 10.0.0.1
- 10.0.1.1
netbios_node_type: 2
vpc_id: vpc-123456
delete_old: True
inherit_existing: False
# Ensure the DHCP option set for the VPC has 10.0.0.4 and 10.0.1.4 as the specified DNS servers, but
# keep any other existing settings. Also, keep the old DHCP option set around.
- ec2_vpc_dhcp_option:
region: us-east-1
dns_servers:
- "{{groups['dns-primary']}}"
- "{{groups['dns-secondary']}}"
vpc_id: vpc-123456
inherit_existing: True
delete_old: False
## Create a DHCP option set with 4.4.4.4 and 8.8.8.8 as the specified DNS servers, with tags
## but do not assign to a VPC
- ec2_vpc_dhcp_option:
region: us-east-1
dns_servers:
- 4.4.4.4
- 8.8.8.8
tags:
Name: google servers
Environment: Test
## Delete a DHCP options set that matches the tags and options specified
- ec2_vpc_dhcp_option:
region: us-east-1
dns_servers:
- 4.4.4.4
- 8.8.8.8
tags:
Name: google servers
Environment: Test
state: absent
## Associate a DHCP options set with a VPC by ID
- ec2_vpc_dhcp_option:
region: us-east-1
dhcp_options_id: dopt-12345678
vpc_id: vpc-123456
"""
import collections
import traceback
from time import sleep, time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info
if HAS_BOTO:
import boto.vpc
import boto.ec2
from boto.exception import EC2ResponseError
def get_resource_tags(vpc_conn, resource_id):
return dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
def retry_not_found(to_call, *args, **kwargs):
start_time = time()
while time() < start_time + 300:
try:
return to_call(*args, **kwargs)
except EC2ResponseError as e:
if e.error_code in ['InvalidDhcpOptionID.NotFound', 'InvalidDhcpOptionsID.NotFound']:
sleep(3)
continue
raise e
def ensure_tags(module, vpc_conn, resource_id, tags, add_only, check_mode):
try:
cur_tags = get_resource_tags(vpc_conn, resource_id)
if tags == cur_tags:
return {'changed': False, 'tags': cur_tags}
to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
if to_delete and not add_only:
retry_not_found(vpc_conn.delete_tags, resource_id, to_delete, dry_run=check_mode)
to_add = dict((k, tags[k]) for k in tags if k not in cur_tags)
if to_add:
retry_not_found(vpc_conn.create_tags, resource_id, to_add, dry_run=check_mode)
latest_tags = get_resource_tags(vpc_conn, resource_id)
return {'changed': True, 'tags': latest_tags}
except EC2ResponseError as e:
module.fail_json(msg="Failed to modify tags: %s" % e.message, exception=traceback.format_exc())
def fetch_dhcp_options_for_vpc(vpc_conn, vpc_id):
"""
Returns the DHCP options object currently associated with the requested VPC ID using the VPC
connection variable.
"""
vpcs = vpc_conn.get_all_vpcs(vpc_ids=[vpc_id])
if len(vpcs) != 1 or vpcs[0].dhcp_options_id == "default":
return None
dhcp_options = vpc_conn.get_all_dhcp_options(dhcp_options_ids=[vpcs[0].dhcp_options_id])
if len(dhcp_options) != 1:
return None
return dhcp_options[0]
def match_dhcp_options(vpc_conn, tags=None, options=None):
"""
Finds a DHCP Options object that optionally matches the tags and options provided
"""
dhcp_options = vpc_conn.get_all_dhcp_options()
for dopts in dhcp_options:
if (not tags) or get_resource_tags(vpc_conn, dopts.id) == tags:
if (not options) or dopts.options == options:
return(True, dopts)
return(False, None)
def remove_dhcp_options_by_id(vpc_conn, dhcp_options_id):
associations = vpc_conn.get_all_vpcs(filters={'dhcpOptionsId': dhcp_options_id})
if len(associations) > 0:
return False
else:
vpc_conn.delete_dhcp_options(dhcp_options_id)
return True
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
dhcp_options_id=dict(type='str', default=None),
domain_name=dict(type='str', default=None),
dns_servers=dict(type='list', default=None),
ntp_servers=dict(type='list', default=None),
netbios_name_servers=dict(type='list', default=None),
netbios_node_type=dict(type='int', default=None),
vpc_id=dict(type='str', default=None),
delete_old=dict(type='bool', default=True),
inherit_existing=dict(type='bool', default=False),
tags=dict(type='dict', default=None, aliases=['resource_tags']),
state=dict(type='str', default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
params = module.params
found = False
changed = False
new_options = collections.defaultdict(lambda: None)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
region, ec2_url, boto_params = get_aws_connection_info(module)
connection = connect_to_aws(boto.vpc, region, **boto_params)
existing_options = None
# First check if we were given a dhcp_options_id
if not params['dhcp_options_id']:
# No, so create new_options from the parameters
if params['dns_servers'] is not None:
new_options['domain-name-servers'] = params['dns_servers']
if params['netbios_name_servers'] is not None:
new_options['netbios-name-servers'] = params['netbios_name_servers']
if params['ntp_servers'] is not None:
new_options['ntp-servers'] = params['ntp_servers']
if params['domain_name'] is not None:
# needs to be a list for comparison with boto objects later
new_options['domain-name'] = [params['domain_name']]
if params['netbios_node_type'] is not None:
# needs to be a list for comparison with boto objects later
new_options['netbios-node-type'] = [str(params['netbios_node_type'])]
# If we were given a vpc_id then we need to look at the options on that
if params['vpc_id']:
existing_options = fetch_dhcp_options_for_vpc(connection, params['vpc_id'])
# if we've been asked to inherit existing options, do that now
if params['inherit_existing']:
if existing_options:
for option in ['domain-name-servers', 'netbios-name-servers', 'ntp-servers', 'domain-name', 'netbios-node-type']:
if existing_options.options.get(option) and new_options[option] != [] and (not new_options[option] or [''] == new_options[option]):
new_options[option] = existing_options.options.get(option)
# Do the vpc's dhcp options already match what we're asked for? if so we are done
if existing_options and new_options == existing_options.options:
module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=existing_options.id)
# If no vpc_id was given, or the options don't match then look for an existing set using tags
found, dhcp_option = match_dhcp_options(connection, params['tags'], new_options)
# Now let's cover the case where there are existing options that we were told about by id
# If a dhcp_options_id was supplied we don't look at options inside, just set tags (if given)
else:
supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id': params['dhcp_options_id']})
if len(supplied_options) != 1:
if params['state'] != 'absent':
module.fail_json(msg=" a dhcp_options_id was supplied, but does not exist")
else:
found = True
dhcp_option = supplied_options[0]
if params['state'] != 'absent' and params['tags']:
ensure_tags(module, connection, dhcp_option.id, params['tags'], False, module.check_mode)
# Now we have the dhcp options set, let's do the necessary
# if we found options we were asked to remove then try to do so
if params['state'] == 'absent':
if not module.check_mode:
if found:
changed = remove_dhcp_options_by_id(connection, dhcp_option.id)
module.exit_json(changed=changed, new_options={})
# otherwise if we haven't found the required options we have something to do
elif not module.check_mode and not found:
# create some dhcp options if we weren't able to use existing ones
if not found:
# Convert netbios-node-type and domain-name back to strings
if new_options['netbios-node-type']:
new_options['netbios-node-type'] = new_options['netbios-node-type'][0]
if new_options['domain-name']:
new_options['domain-name'] = new_options['domain-name'][0]
# create the new dhcp options set requested
dhcp_option = connection.create_dhcp_options(
new_options['domain-name'],
new_options['domain-name-servers'],
new_options['ntp-servers'],
new_options['netbios-name-servers'],
new_options['netbios-node-type'])
# wait for dhcp option to be accessible
found_dhcp_opt = False
start_time = time()
try:
found_dhcp_opt = retry_not_found(connection.get_all_dhcp_options, dhcp_options_ids=[dhcp_option.id])
except EC2ResponseError as e:
module.fail_json(msg="Failed to describe DHCP options", exception=traceback.format_exc)
if not found_dhcp_opt:
module.fail_json(msg="Failed to wait for {0} to be available.".format(dhcp_option.id))
changed = True
if params['tags']:
ensure_tags(module, connection, dhcp_option.id, params['tags'], False, module.check_mode)
# If we were given a vpc_id, then attach the options we now have to that before we finish
if params['vpc_id'] and not module.check_mode:
changed = True
connection.associate_dhcp_options(dhcp_option.id, params['vpc_id'])
# and remove old ones if that was requested
if params['delete_old'] and existing_options:
remove_dhcp_options_by_id(connection, existing_options.id)
module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=dhcp_option.id)
if __name__ == "__main__":
main()

@ -1,157 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_vpc_dhcp_option_info
short_description: Gather information about dhcp options sets in AWS
description:
- Gather information about dhcp options sets in AWS
- This module was called C(ec2_vpc_dhcp_option_facts) before Ansible 2.9. The usage did not change.
version_added: "2.2"
requirements: [ boto3 ]
author: "Nick Aslanidis (@naslanidis)"
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeDhcpOptions.html) for possible filters.
type: dict
dhcp_options_ids:
description:
- Get details of specific DHCP Option IDs.
aliases: ['DhcpOptionIds']
type: list
elements: str
dry_run:
description:
- Checks whether you have the required permissions to view the DHCP
Options.
aliases: ['DryRun']
version_added: "2.4"
type: bool
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# # Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather information about all DHCP Option sets for an account or profile
ec2_vpc_dhcp_option_info:
region: ap-southeast-2
profile: production
register: dhcp_info
- name: Gather information about a filtered list of DHCP Option sets
ec2_vpc_dhcp_option_info:
region: ap-southeast-2
profile: production
filters:
"tag:Name": "abc-123"
register: dhcp_info
- name: Gather information about a specific DHCP Option set by DhcpOptionId
ec2_vpc_dhcp_option_info:
region: ap-southeast-2
profile: production
DhcpOptionsIds: dopt-123fece2
register: dhcp_info
'''
RETURN = '''
dhcp_options:
description: The dhcp option sets for the account
returned: always
type: list
changed:
description: True if listing the dhcp options succeeds
type: bool
returned: always
'''
import traceback
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ec2_argument_spec, boto3_conn, HAS_BOTO3,
ansible_dict_to_boto3_filter_list, get_aws_connection_info,
camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict)
def get_dhcp_options_info(dhcp_option):
dhcp_option_info = {'DhcpOptionsId': dhcp_option['DhcpOptionsId'],
'DhcpConfigurations': dhcp_option['DhcpConfigurations'],
'Tags': boto3_tag_list_to_ansible_dict(dhcp_option.get('Tags', [{'Value': '', 'Key': 'Name'}]))}
return dhcp_option_info
def list_dhcp_options(client, module):
params = dict(Filters=ansible_dict_to_boto3_filter_list(module.params.get('filters')))
if module.params.get("dry_run"):
params['DryRun'] = True
if module.params.get("dhcp_options_ids"):
params['DhcpOptionsIds'] = module.params.get("dhcp_options_ids")
try:
all_dhcp_options = client.describe_dhcp_options(**params)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
return [camel_dict_to_snake_dict(get_dhcp_options_info(option))
for option in all_dhcp_options['DhcpOptions']]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(type='dict', default={}),
dry_run=dict(type='bool', default=False, aliases=['DryRun']),
dhcp_options_ids=dict(type='list', aliases=['DhcpOptionIds'])
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if module._name == 'ec2_vpc_dhcp_option_facts':
module.deprecate("The 'ec2_vpc_dhcp_option_facts' module has been renamed to 'ec2_vpc_dhcp_option_info'", version='2.13')
# Validate Requirements
if not HAS_BOTO3:
module.fail_json(msg='boto3 and botocore are required.')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg="Can't authorize connection - " + str(e))
# call your function here
results = list_dhcp_options(connection, module)
module.exit_json(dhcp_options=results)
if __name__ == '__main__':
main()

@ -1,524 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_vpc_net
short_description: Configure AWS virtual private clouds
description:
- Create, modify, and terminate AWS virtual private clouds.
version_added: "2.0"
author:
- Jonathan Davila (@defionscode)
- Sloane Hertel (@s-hertel)
options:
name:
description:
- The name to give your VPC. This is used in combination with C(cidr_block) to determine if a VPC already exists.
required: yes
type: str
cidr_block:
description:
- The primary CIDR of the VPC. After 2.5 a list of CIDRs can be provided. The first in the list will be used as the primary CIDR
and is used in conjunction with the C(name) to ensure idempotence.
required: yes
type: list
elements: str
ipv6_cidr:
description:
- Request an Amazon-provided IPv6 CIDR block with /56 prefix length. You cannot specify the range of IPv6 addresses,
or the size of the CIDR block.
default: False
type: bool
version_added: '2.10'
purge_cidrs:
description:
- Remove CIDRs that are associated with the VPC and are not specified in C(cidr_block).
default: no
type: bool
version_added: '2.5'
tenancy:
description:
- Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
default: default
choices: [ 'default', 'dedicated' ]
type: str
dns_support:
description:
- Whether to enable AWS DNS support.
default: yes
type: bool
dns_hostnames:
description:
- Whether to enable AWS hostname support.
default: yes
type: bool
dhcp_opts_id:
description:
- The id of the DHCP options to use for this VPC.
type: str
tags:
description:
- The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of
the VPC if it's different.
aliases: [ 'resource_tags' ]
type: dict
state:
description:
- The state of the VPC. Either absent or present.
default: present
choices: [ 'present', 'absent' ]
type: str
multi_ok:
description:
- By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want
duplicate VPCs created.
type: bool
default: false
requirements:
- boto3
- botocore
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: create a VPC with dedicated tenancy and a couple of tags
ec2_vpc_net:
name: Module_dev2
cidr_block: 10.10.0.0/16
region: us-east-1
tags:
module: ec2_vpc_net
this: works
tenancy: dedicated
- name: create a VPC with dedicated tenancy and request an IPv6 CIDR
ec2_vpc_net:
name: Module_dev2
cidr_block: 10.10.0.0/16
ipv6_cidr: True
region: us-east-1
tenancy: dedicated
'''
RETURN = '''
vpc:
description: info about the VPC that was created or deleted
returned: always
type: complex
contains:
cidr_block:
description: The CIDR of the VPC
returned: always
type: str
sample: 10.0.0.0/16
cidr_block_association_set:
description: IPv4 CIDR blocks associated with the VPC
returned: success
type: list
sample:
"cidr_block_association_set": [
{
"association_id": "vpc-cidr-assoc-97aeeefd",
"cidr_block": "20.0.0.0/24",
"cidr_block_state": {
"state": "associated"
}
}
]
classic_link_enabled:
description: indicates whether ClassicLink is enabled
returned: always
type: bool
sample: false
dhcp_options_id:
description: the id of the DHCP options associated with this VPC
returned: always
type: str
sample: dopt-0fb8bd6b
id:
description: VPC resource id
returned: always
type: str
sample: vpc-c2e00da5
instance_tenancy:
description: indicates whether VPC uses default or dedicated tenancy
returned: always
type: str
sample: default
ipv6_cidr_block_association_set:
description: IPv6 CIDR blocks associated with the VPC
returned: success
type: list
sample:
"ipv6_cidr_block_association_set": [
{
"association_id": "vpc-cidr-assoc-97aeeefd",
"ipv6_cidr_block": "2001:db8::/56",
"ipv6_cidr_block_state": {
"state": "associated"
}
}
]
is_default:
description: indicates whether this is the default VPC
returned: always
type: bool
sample: false
state:
description: state of the VPC
returned: always
type: str
sample: available
tags:
description: tags attached to the VPC, includes name
returned: always
type: complex
contains:
Name:
description: name tag for the VPC
returned: always
type: str
sample: pk_vpc4
'''
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from time import sleep, time
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import (AWSRetry, camel_dict_to_snake_dict, compare_aws_tags,
ansible_dict_to_boto3_tag_list, boto3_tag_list_to_ansible_dict)
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native
from ansible.module_utils.network.common.utils import to_subnet
def vpc_exists(module, vpc, name, cidr_block, multi):
"""Returns None or a vpc object depending on the existence of a VPC. When supplied
with a CIDR, it will check for matching tags to determine if it is a match
otherwise it will assume the VPC does not exist and thus return None.
"""
try:
matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': cidr_block}])['Vpcs']
# If an exact matching using a list of CIDRs isn't found, check for a match with the first CIDR as is documented for C(cidr_block)
if not matching_vpcs:
matching_vpcs = vpc.describe_vpcs(Filters=[{'Name': 'tag:Name', 'Values': [name]}, {'Name': 'cidr-block', 'Values': [cidr_block[0]]}])['Vpcs']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe VPCs")
if multi:
return None
elif len(matching_vpcs) == 1:
return matching_vpcs[0]['VpcId']
elif len(matching_vpcs) > 1:
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
'CIDR block you specified. If you would like to create '
'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
return None
@AWSRetry.backoff(delay=3, tries=8, catch_extra_error_codes=['InvalidVpcID.NotFound'])
def get_classic_link_with_backoff(connection, vpc_id):
try:
return connection.describe_vpc_classic_link(VpcIds=[vpc_id])['Vpcs'][0].get('ClassicLinkEnabled')
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Message"] == "The functionality you requested is not available in this region.":
return False
else:
raise
def get_vpc(module, connection, vpc_id):
# wait for vpc to be available
try:
connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be available.".format(vpc_id))
try:
vpc_obj = connection.describe_vpcs(VpcIds=[vpc_id], aws_retry=True)['Vpcs'][0]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe VPCs")
try:
vpc_obj['ClassicLinkEnabled'] = get_classic_link_with_backoff(connection, vpc_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to describe VPCs")
return vpc_obj
def update_vpc_tags(connection, module, vpc_id, tags, name):
if tags is None:
tags = dict()
tags.update({'Name': name})
tags = dict((k, to_native(v)) for k, v in tags.items())
try:
current_tags = dict((t['Key'], t['Value']) for t in connection.describe_tags(Filters=[{'Name': 'resource-id', 'Values': [vpc_id]}])['Tags'])
tags_to_update, dummy = compare_aws_tags(current_tags, tags, False)
if tags_to_update:
if not module.check_mode:
tags = ansible_dict_to_boto3_tag_list(tags_to_update)
vpc_obj = connection.create_tags(Resources=[vpc_id], Tags=tags, aws_retry=True)
# Wait for tags to be updated
expected_tags = boto3_tag_list_to_ansible_dict(tags)
filters = [{'Name': 'tag:{0}'.format(key), 'Values': [value]} for key, value in expected_tags.items()]
connection.get_waiter('vpc_available').wait(VpcIds=[vpc_id], Filters=filters)
return True
else:
return False
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to update tags")
def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
if vpc_obj['DhcpOptionsId'] != dhcp_id:
if not module.check_mode:
try:
connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj['VpcId'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to associate DhcpOptionsId {0}".format(dhcp_id))
try:
# Wait for DhcpOptionsId to be updated
filters = [{'Name': 'dhcp-options-id', 'Values': [dhcp_id]}]
connection.get_waiter('vpc_available').wait(VpcIds=[vpc_obj['VpcId']], Filters=filters)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json(msg="Failed to wait for DhcpOptionsId to be updated")
return True
else:
return False
def create_vpc(connection, module, cidr_block, tenancy):
try:
if not module.check_mode:
vpc_obj = connection.create_vpc(CidrBlock=cidr_block, InstanceTenancy=tenancy)
else:
module.exit_json(changed=True)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Failed to create the VPC")
# wait for vpc to exist
try:
connection.get_waiter('vpc_exists').wait(VpcIds=[vpc_obj['Vpc']['VpcId']])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Unable to wait for VPC {0} to be created.".format(vpc_obj['Vpc']['VpcId']))
return vpc_obj['Vpc']['VpcId']
def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value):
start_time = time()
updated = False
while time() < start_time + 300:
current_value = connection.describe_vpc_attribute(
Attribute=attribute,
VpcId=vpc_id
)['{0}{1}'.format(attribute[0].upper(), attribute[1:])]['Value']
if current_value != expected_value:
sleep(3)
else:
updated = True
break
if not updated:
module.fail_json(msg="Failed to wait for {0} to be updated".format(attribute))
def get_cidr_network_bits(module, cidr_block):
fixed_cidrs = []
for cidr in cidr_block:
split_addr = cidr.split('/')
if len(split_addr) == 2:
# this_ip is a IPv4 CIDR that may or may not have host bits set
# Get the network bits.
valid_cidr = to_subnet(split_addr[0], split_addr[1])
if cidr != valid_cidr:
module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, "
"check the network mask and make sure that only network bits are set: {1}.".format(cidr, valid_cidr))
fixed_cidrs.append(valid_cidr)
else:
# let AWS handle invalid CIDRs
fixed_cidrs.append(cidr)
return fixed_cidrs
def main():
argument_spec = dict(
name=dict(required=True),
cidr_block=dict(type='list', required=True),
ipv6_cidr=dict(type='bool', default=False),
tenancy=dict(choices=['default', 'dedicated'], default='default'),
dns_support=dict(type='bool', default=True),
dns_hostnames=dict(type='bool', default=True),
dhcp_opts_id=dict(),
tags=dict(type='dict', aliases=['resource_tags']),
state=dict(choices=['present', 'absent'], default='present'),
multi_ok=dict(type='bool', default=False),
purge_cidrs=dict(type='bool', default=False),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
supports_check_mode=True
)
name = module.params.get('name')
cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block'))
ipv6_cidr = module.params.get('ipv6_cidr')
purge_cidrs = module.params.get('purge_cidrs')
tenancy = module.params.get('tenancy')
dns_support = module.params.get('dns_support')
dns_hostnames = module.params.get('dns_hostnames')
dhcp_id = module.params.get('dhcp_opts_id')
tags = module.params.get('tags')
state = module.params.get('state')
multi = module.params.get('multi_ok')
changed = False
connection = module.client(
'ec2',
retry_decorator=AWSRetry.jittered_backoff(
retries=8, delay=3, catch_extra_error_codes=['InvalidVpcID.NotFound']
)
)
if dns_hostnames and not dns_support:
module.fail_json(msg='In order to enable DNS Hostnames you must also enable DNS support')
if state == 'present':
# Check if VPC exists
vpc_id = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_id is None:
vpc_id = create_vpc(connection, module, cidr_block[0], tenancy)
changed = True
vpc_obj = get_vpc(module, connection, vpc_id)
associated_cidrs = dict((cidr['CidrBlock'], cidr['AssociationId']) for cidr in vpc_obj.get('CidrBlockAssociationSet', [])
if cidr['CidrBlockState']['State'] != 'disassociated')
to_add = [cidr for cidr in cidr_block if cidr not in associated_cidrs]
to_remove = [associated_cidrs[cidr] for cidr in associated_cidrs if cidr not in cidr_block]
expected_cidrs = [cidr for cidr in associated_cidrs if associated_cidrs[cidr] not in to_remove] + to_add
if len(cidr_block) > 1:
for cidr in to_add:
changed = True
try:
connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr))
if ipv6_cidr:
if 'Ipv6CidrBlockAssociationSet' in vpc_obj.keys():
module.warn("Only one IPv6 CIDR is permitted per VPC, {0} already has CIDR {1}".format(
vpc_id,
vpc_obj['Ipv6CidrBlockAssociationSet'][0]['Ipv6CidrBlock']))
else:
try:
connection.associate_vpc_cidr_block(AmazonProvidedIpv6CidrBlock=ipv6_cidr, VpcId=vpc_id)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(ipv6_cidr))
if purge_cidrs:
for association_id in to_remove:
changed = True
try:
connection.disassociate_vpc_cidr_block(AssociationId=association_id)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Unable to disassociate {0}. You must detach or delete all gateways and resources that "
"are associated with the CIDR block before you can disassociate it.".format(association_id))
if dhcp_id is not None:
try:
if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Failed to update DHCP options")
if tags is not None or name is not None:
try:
if update_vpc_tags(connection, module, vpc_id, tags, name):
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to update tags")
current_dns_enabled = connection.describe_vpc_attribute(Attribute='enableDnsSupport', VpcId=vpc_id, aws_retry=True)['EnableDnsSupport']['Value']
current_dns_hostnames = connection.describe_vpc_attribute(Attribute='enableDnsHostnames', VpcId=vpc_id, aws_retry=True)['EnableDnsHostnames']['Value']
if current_dns_enabled != dns_support:
changed = True
if not module.check_mode:
try:
connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={'Value': dns_support})
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Failed to update enabled dns support attribute")
if current_dns_hostnames != dns_hostnames:
changed = True
if not module.check_mode:
try:
connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={'Value': dns_hostnames})
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Failed to update enabled dns hostnames attribute")
# wait for associated cidrs to match
if to_add or to_remove:
try:
connection.get_waiter('vpc_available').wait(
VpcIds=[vpc_id],
Filters=[{'Name': 'cidr-block-association.cidr-block', 'Values': expected_cidrs}]
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Failed to wait for CIDRs to update")
# try to wait for enableDnsSupport and enableDnsHostnames to match
wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport', dns_support)
wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsHostnames', dns_hostnames)
final_state = camel_dict_to_snake_dict(get_vpc(module, connection, vpc_id))
final_state['tags'] = boto3_tag_list_to_ansible_dict(final_state.get('tags', []))
final_state['id'] = final_state.pop('vpc_id')
module.exit_json(changed=changed, vpc=final_state)
elif state == 'absent':
# Check if VPC exists
vpc_id = vpc_exists(module, connection, name, cidr_block, multi)
if vpc_id is not None:
try:
if not module.check_mode:
connection.delete_vpc(VpcId=vpc_id)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
"and/or ec2_vpc_route_table modules to ensure the other components are absent.".format(vpc_id))
module.exit_json(changed=changed, vpc={})
if __name__ == '__main__':
main()

@ -1,306 +0,0 @@
#!/usr/bin/python
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_vpc_net_info
short_description: Gather information about ec2 VPCs in AWS
description:
- Gather information about ec2 VPCs in AWS
- This module was called C(ec2_vpc_net_facts) before Ansible 2.9. The usage did not change.
version_added: "2.1"
author: "Rob White (@wimnat)"
requirements:
- boto3
- botocore
options:
vpc_ids:
description:
- A list of VPC IDs that exist in your account.
version_added: "2.5"
type: list
elements: str
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters.
type: dict
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all VPCs
- ec2_vpc_net_info:
# Gather information about a particular VPC using VPC ID
- ec2_vpc_net_info:
vpc_ids: vpc-00112233
# Gather information about any VPC with a tag key Name and value Example
- ec2_vpc_net_info:
filters:
"tag:Name": Example
'''
RETURN = '''
vpcs:
description: Returns an array of complex objects as described below.
returned: success
type: complex
contains:
id:
description: The ID of the VPC (for backwards compatibility).
returned: always
type: str
vpc_id:
description: The ID of the VPC .
returned: always
type: str
state:
description: The state of the VPC.
returned: always
type: str
tags:
description: A dict of tags associated with the VPC.
returned: always
type: dict
instance_tenancy:
description: The instance tenancy setting for the VPC.
returned: always
type: str
is_default:
description: True if this is the default VPC for account.
returned: always
type: bool
cidr_block:
description: The IPv4 CIDR block assigned to the VPC.
returned: always
type: str
classic_link_dns_supported:
description: True/False depending on attribute setting for classic link DNS support.
returned: always
type: bool
classic_link_enabled:
description: True/False depending on if classic link support is enabled.
returned: always
type: bool
enable_dns_hostnames:
description: True/False depending on attribute setting for DNS hostnames support.
returned: always
type: bool
enable_dns_support:
description: True/False depending on attribute setting for DNS support.
returned: always
type: bool
cidr_block_association_set:
description: An array of IPv4 cidr block association set information.
returned: always
type: complex
contains:
association_id:
description: The association ID
returned: always
type: str
cidr_block:
description: The IPv4 CIDR block that is associated with the VPC.
returned: always
type: str
cidr_block_state:
description: A hash/dict that contains a single item. The state of the cidr block association.
returned: always
type: dict
contains:
state:
description: The CIDR block association state.
returned: always
type: str
ipv6_cidr_block_association_set:
description: An array of IPv6 cidr block association set information.
returned: always
type: complex
contains:
association_id:
description: The association ID
returned: always
type: str
ipv6_cidr_block:
description: The IPv6 CIDR block that is associated with the VPC.
returned: always
type: str
ipv6_cidr_block_state:
description: A hash/dict that contains a single item. The state of the cidr block association.
returned: always
type: dict
contains:
state:
description: The CIDR block association state.
returned: always
type: str
'''
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (
boto3_conn,
ec2_argument_spec,
get_aws_connection_info,
AWSRetry,
HAS_BOTO3,
boto3_tag_list_to_ansible_dict,
camel_dict_to_snake_dict,
ansible_dict_to_boto3_filter_list
)
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
@AWSRetry.exponential_backoff()
def describe_vpc_attr_with_backoff(connection, vpc_id, vpc_attribute):
"""
Describe VPC Attributes with AWSRetry backoff throttling support.
connection : boto3 client connection object
vpc_id : The VPC ID to pull attribute value from
vpc_attribute : The VPC attribute to get the value from - valid options = enableDnsSupport or enableDnsHostnames
"""
return connection.describe_vpc_attribute(VpcId=vpc_id, Attribute=vpc_attribute)
def describe_vpcs(connection, module):
"""
Describe VPCs.
connection : boto3 client connection object
module : AnsibleModule object
"""
# collect parameters
filters = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
vpc_ids = module.params.get('vpc_ids')
# init empty list for return vars
vpc_info = list()
vpc_list = list()
# Get the basic VPC info
try:
response = connection.describe_vpcs(VpcIds=vpc_ids, Filters=filters)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to describe VPCs {0}: {1}".format(vpc_ids, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to describe VPCs {0}: {1}".format(vpc_ids, to_native(e)),
exception=traceback.format_exc())
# Loop through results and create a list of VPC IDs
for vpc in response['Vpcs']:
vpc_list.append(vpc['VpcId'])
# We can get these results in bulk but still needs two separate calls to the API
try:
cl_enabled = connection.describe_vpc_classic_link(VpcIds=vpc_list)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Message"] == "The functionality you requested is not available in this region.":
cl_enabled = {'Vpcs': [{'VpcId': vpc_id, 'ClassicLinkEnabled': False} for vpc_id in vpc_list]}
else:
module.fail_json(msg="Unable to describe if ClassicLink is enabled: {0}".format(to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to describe if ClassicLink is enabled: {0}".format(to_native(e)),
exception=traceback.format_exc())
try:
cl_dns_support = connection.describe_vpc_classic_link_dns_support(VpcIds=vpc_list)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Message"] == "The functionality you requested is not available in this region.":
cl_dns_support = {'Vpcs': [{'VpcId': vpc_id, 'ClassicLinkDnsSupported': False} for vpc_id in vpc_list]}
else:
module.fail_json(msg="Unable to describe if ClassicLinkDns is supported: {0}".format(to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to describe if ClassicLinkDns is supported: {0}".format(to_native(e)),
exception=traceback.format_exc())
# Loop through the results and add the other VPC attributes we gathered
for vpc in response['Vpcs']:
error_message = "Unable to describe VPC attribute {0}: {1}"
# We have to make two separate calls per VPC to get these attributes.
try:
dns_support = describe_vpc_attr_with_backoff(connection, vpc['VpcId'], 'enableDnsSupport')
except botocore.exceptions.ClientError as e:
module.fail_json(msg=error_message.format('enableDnsSupport', to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg=error_message.format('enableDnsSupport', to_native(e)),
exception=traceback.format_exc())
try:
dns_hostnames = describe_vpc_attr_with_backoff(connection, vpc['VpcId'], 'enableDnsHostnames')
except botocore.exceptions.ClientError as e:
module.fail_json(msg=error_message.format('enableDnsHostnames', to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg=error_message.format('enableDnsHostnames', to_native(e)),
exception=traceback.format_exc())
# loop through the ClassicLink Enabled results and add the value for the correct VPC
for item in cl_enabled['Vpcs']:
if vpc['VpcId'] == item['VpcId']:
vpc['ClassicLinkEnabled'] = item['ClassicLinkEnabled']
# loop through the ClassicLink DNS support results and add the value for the correct VPC
for item in cl_dns_support['Vpcs']:
if vpc['VpcId'] == item['VpcId']:
vpc['ClassicLinkDnsSupported'] = item['ClassicLinkDnsSupported']
# add the two DNS attributes
vpc['EnableDnsSupport'] = dns_support['EnableDnsSupport'].get('Value')
vpc['EnableDnsHostnames'] = dns_hostnames['EnableDnsHostnames'].get('Value')
# for backwards compatibility
vpc['id'] = vpc['VpcId']
vpc_info.append(camel_dict_to_snake_dict(vpc))
# convert tag list to ansible dict
vpc_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(vpc.get('Tags', []))
module.exit_json(vpcs=vpc_info)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
vpc_ids=dict(type='list', default=[]),
filters=dict(type='dict', default={})
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if module._name == 'ec2_vpc_net_facts':
module.deprecate("The 'ec2_vpc_net_facts' module has been renamed to 'ec2_vpc_net_info'", version='2.13')
if not HAS_BOTO3:
module.fail_json(msg='boto3 and botocore are required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
describe_vpcs(connection, module)
if __name__ == '__main__':
main()

@ -1,604 +0,0 @@
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_vpc_subnet
short_description: Manage subnets in AWS virtual private clouds
description:
- Manage subnets in AWS virtual private clouds.
version_added: "2.0"
author:
- Robert Estelle (@erydo)
- Brad Davidson (@brandond)
requirements: [ boto3 ]
options:
az:
description:
- "The availability zone for the subnet."
type: str
cidr:
description:
- "The CIDR block for the subnet. E.g. 192.0.2.0/24."
type: str
required: true
ipv6_cidr:
description:
- "The IPv6 CIDR block for the subnet. The VPC must have a /56 block assigned and this value must be a valid IPv6 /64 that falls in the VPC range."
- "Required if I(assign_instances_ipv6=true)"
version_added: "2.5"
type: str
tags:
description:
- "A dict of tags to apply to the subnet. Any tags currently applied to the subnet and not present here will be removed."
aliases: [ 'resource_tags' ]
type: dict
state:
description:
- "Create or remove the subnet."
default: present
choices: [ 'present', 'absent' ]
type: str
vpc_id:
description:
- "VPC ID of the VPC in which to create or delete the subnet."
required: true
type: str
map_public:
description:
- "Specify C(yes) to indicate that instances launched into the subnet should be assigned public IP address by default."
type: bool
default: 'no'
version_added: "2.4"
assign_instances_ipv6:
description:
- "Specify C(yes) to indicate that instances launched into the subnet should be automatically assigned an IPv6 address."
type: bool
default: false
version_added: "2.5"
wait:
description:
- "When I(wait=true) and I(state=present), module will wait for subnet to be in available state before continuing."
type: bool
default: true
version_added: "2.5"
wait_timeout:
description:
- "Number of seconds to wait for subnet to become available I(wait=True)."
default: 300
version_added: "2.5"
type: int
purge_tags:
description:
- Whether or not to remove tags that do not appear in the I(tags) list.
type: bool
default: true
version_added: "2.5"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Create subnet for database servers
ec2_vpc_subnet:
state: present
vpc_id: vpc-123456
cidr: 10.0.1.16/28
tags:
Name: Database Subnet
register: database_subnet
- name: Remove subnet for database servers
ec2_vpc_subnet:
state: absent
vpc_id: vpc-123456
cidr: 10.0.1.16/28
- name: Create subnet with IPv6 block assigned
ec2_vpc_subnet:
state: present
vpc_id: vpc-123456
cidr: 10.1.100.0/24
ipv6_cidr: 2001:db8:0:102::/64
- name: Remove IPv6 block assigned to subnet
ec2_vpc_subnet:
state: present
vpc_id: vpc-123456
cidr: 10.1.100.0/24
ipv6_cidr: ''
'''
RETURN = '''
subnet:
description: Dictionary of subnet values
returned: I(state=present)
type: complex
contains:
id:
description: Subnet resource id
returned: I(state=present)
type: str
sample: subnet-b883b2c4
cidr_block:
description: The IPv4 CIDR of the Subnet
returned: I(state=present)
type: str
sample: "10.0.0.0/16"
ipv6_cidr_block:
description: The IPv6 CIDR block actively associated with the Subnet
returned: I(state=present)
type: str
sample: "2001:db8:0:102::/64"
availability_zone:
description: Availability zone of the Subnet
returned: I(state=present)
type: str
sample: us-east-1a
state:
description: state of the Subnet
returned: I(state=present)
type: str
sample: available
tags:
description: tags attached to the Subnet, includes name
returned: I(state=present)
type: dict
sample: {"Name": "My Subnet", "env": "staging"}
map_public_ip_on_launch:
description: whether public IP is auto-assigned to new instances
returned: I(state=present)
type: bool
sample: false
assign_ipv6_address_on_creation:
description: whether IPv6 address is auto-assigned to new instances
returned: I(state=present)
type: bool
sample: false
vpc_id:
description: the id of the VPC where this Subnet exists
returned: I(state=present)
type: str
sample: vpc-67236184
available_ip_address_count:
description: number of available IPv4 addresses
returned: I(state=present)
type: str
sample: 251
default_for_az:
description: indicates whether this is the default Subnet for this Availability Zone
returned: I(state=present)
type: bool
sample: false
ipv6_association_id:
description: The IPv6 association ID for the currently associated CIDR
returned: I(state=present)
type: str
sample: subnet-cidr-assoc-b85c74d2
ipv6_cidr_block_association_set:
description: An array of IPv6 cidr block association set information.
returned: I(state=present)
type: complex
contains:
association_id:
description: The association ID
returned: always
type: str
ipv6_cidr_block:
description: The IPv6 CIDR block that is associated with the subnet.
returned: always
type: str
ipv6_cidr_block_state:
description: A hash/dict that contains a single item. The state of the cidr block association.
returned: always
type: dict
contains:
state:
description: The CIDR block association state.
returned: always
type: str
'''
import time
try:
import botocore
except ImportError:
pass # caught by AnsibleAWSModule
from ansible.module_utils._text import to_text
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.aws.waiters import get_waiter
from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, ansible_dict_to_boto3_tag_list,
camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags, AWSRetry)
def get_subnet_info(subnet):
if 'Subnets' in subnet:
return [get_subnet_info(s) for s in subnet['Subnets']]
elif 'Subnet' in subnet:
subnet = camel_dict_to_snake_dict(subnet['Subnet'])
else:
subnet = camel_dict_to_snake_dict(subnet)
if 'tags' in subnet:
subnet['tags'] = boto3_tag_list_to_ansible_dict(subnet['tags'])
else:
subnet['tags'] = dict()
if 'subnet_id' in subnet:
subnet['id'] = subnet['subnet_id']
del subnet['subnet_id']
subnet['ipv6_cidr_block'] = ''
subnet['ipv6_association_id'] = ''
ipv6set = subnet.get('ipv6_cidr_block_association_set')
if ipv6set:
for item in ipv6set:
if item.get('ipv6_cidr_block_state', {}).get('state') in ('associated', 'associating'):
subnet['ipv6_cidr_block'] = item['ipv6_cidr_block']
subnet['ipv6_association_id'] = item['association_id']
return subnet
@AWSRetry.exponential_backoff()
def describe_subnets_with_backoff(client, **params):
return client.describe_subnets(**params)
def waiter_params(module, params, start_time):
if not module.botocore_at_least("1.7.0"):
remaining_wait_timeout = int(module.params['wait_timeout'] + start_time - time.time())
params['WaiterConfig'] = {'Delay': 5, 'MaxAttempts': remaining_wait_timeout // 5}
return params
def handle_waiter(conn, module, waiter_name, params, start_time):
try:
get_waiter(conn, waiter_name).wait(
**waiter_params(module, params, start_time)
)
except botocore.exceptions.WaiterError as e:
module.fail_json_aws(e, "Failed to wait for updates to complete")
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "An exception happened while trying to wait for updates")
def create_subnet(conn, module, vpc_id, cidr, ipv6_cidr=None, az=None, start_time=None):
wait = module.params['wait']
wait_timeout = module.params['wait_timeout']
params = dict(VpcId=vpc_id,
CidrBlock=cidr)
if ipv6_cidr:
params['Ipv6CidrBlock'] = ipv6_cidr
if az:
params['AvailabilityZone'] = az
try:
subnet = get_subnet_info(conn.create_subnet(**params))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create subnet")
# Sometimes AWS takes its time to create a subnet and so using
# new subnets's id to do things like create tags results in
# exception.
if wait and subnet.get('state') != 'available':
handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
try:
conn.get_waiter('subnet_available').wait(
**waiter_params(module, {'SubnetIds': [subnet['id']]}, start_time)
)
subnet['state'] = 'available'
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, "Create subnet action timed out waiting for subnet to become available")
return subnet
def ensure_tags(conn, module, subnet, tags, purge_tags, start_time):
changed = False
filters = ansible_dict_to_boto3_filter_list({'resource-id': subnet['id'], 'resource-type': 'subnet'})
try:
cur_tags = conn.describe_tags(Filters=filters)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't describe tags")
to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(cur_tags.get('Tags')), tags, purge_tags)
if to_update:
try:
if not module.check_mode:
AWSRetry.exponential_backoff(
catch_extra_error_codes=['InvalidSubnetID.NotFound']
)(conn.create_tags)(
Resources=[subnet['id']],
Tags=ansible_dict_to_boto3_tag_list(to_update)
)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't create tags")
if to_delete:
try:
if not module.check_mode:
tags_list = []
for key in to_delete:
tags_list.append({'Key': key})
AWSRetry.exponential_backoff(
catch_extra_error_codes=['InvalidSubnetID.NotFound']
)(conn.delete_tags)(Resources=[subnet['id']], Tags=tags_list)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete tags")
if module.params['wait'] and not module.check_mode:
# Wait for tags to be updated
filters = [{'Name': 'tag:{0}'.format(k), 'Values': [v]} for k, v in tags.items()]
handle_waiter(conn, module, 'subnet_exists',
{'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
return changed
def ensure_map_public(conn, module, subnet, map_public, check_mode, start_time):
if check_mode:
return
try:
conn.modify_subnet_attribute(SubnetId=subnet['id'], MapPublicIpOnLaunch={'Value': map_public})
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
def ensure_assign_ipv6_on_create(conn, module, subnet, assign_instances_ipv6, check_mode, start_time):
if check_mode:
return
try:
conn.modify_subnet_attribute(SubnetId=subnet['id'], AssignIpv6AddressOnCreation={'Value': assign_instances_ipv6})
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't modify subnet attribute")
def disassociate_ipv6_cidr(conn, module, subnet, start_time):
if subnet.get('assign_ipv6_address_on_creation'):
ensure_assign_ipv6_on_create(conn, module, subnet, False, False, start_time)
try:
conn.disassociate_subnet_cidr_block(AssociationId=subnet['ipv6_association_id'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't disassociate ipv6 cidr block id {0} from subnet {1}"
.format(subnet['ipv6_association_id'], subnet['id']))
# Wait for cidr block to be disassociated
if module.params['wait']:
filters = ansible_dict_to_boto3_filter_list(
{'ipv6-cidr-block-association.state': ['disassociated'],
'vpc-id': subnet['vpc_id']}
)
handle_waiter(conn, module, 'subnet_exists',
{'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_time):
wait = module.params['wait']
changed = False
if subnet['ipv6_association_id'] and not ipv6_cidr:
if not check_mode:
disassociate_ipv6_cidr(conn, module, subnet, start_time)
changed = True
if ipv6_cidr:
filters = ansible_dict_to_boto3_filter_list({'ipv6-cidr-block-association.ipv6-cidr-block': ipv6_cidr,
'vpc-id': subnet['vpc_id']})
try:
check_subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't get subnet info")
if check_subnets and check_subnets[0]['ipv6_cidr_block']:
module.fail_json(msg="The IPv6 CIDR '{0}' conflicts with another subnet".format(ipv6_cidr))
if subnet['ipv6_association_id']:
if not check_mode:
disassociate_ipv6_cidr(conn, module, subnet, start_time)
changed = True
try:
if not check_mode:
associate_resp = conn.associate_subnet_cidr_block(SubnetId=subnet['id'], Ipv6CidrBlock=ipv6_cidr)
changed = True
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't associate ipv6 cidr {0} to {1}".format(ipv6_cidr, subnet['id']))
else:
if not check_mode and wait:
filters = ansible_dict_to_boto3_filter_list(
{'ipv6-cidr-block-association.state': ['associated'],
'vpc-id': subnet['vpc_id']}
)
handle_waiter(conn, module, 'subnet_exists',
{'SubnetIds': [subnet['id']], 'Filters': filters}, start_time)
if associate_resp.get('Ipv6CidrBlockAssociation', {}).get('AssociationId'):
subnet['ipv6_association_id'] = associate_resp['Ipv6CidrBlockAssociation']['AssociationId']
subnet['ipv6_cidr_block'] = associate_resp['Ipv6CidrBlockAssociation']['Ipv6CidrBlock']
if subnet['ipv6_cidr_block_association_set']:
subnet['ipv6_cidr_block_association_set'][0] = camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation'])
else:
subnet['ipv6_cidr_block_association_set'].append(camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation']))
return changed
def get_matching_subnet(conn, module, vpc_id, cidr):
filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr-block': cidr})
try:
subnets = get_subnet_info(describe_subnets_with_backoff(conn, Filters=filters))
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't get matching subnet")
if subnets:
return subnets[0]
return None
def ensure_subnet_present(conn, module):
subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
changed = False
# Initialize start so max time does not exceed the specified wait_timeout for multiple operations
start_time = time.time()
if subnet is None:
if not module.check_mode:
subnet = create_subnet(conn, module, module.params['vpc_id'], module.params['cidr'],
ipv6_cidr=module.params['ipv6_cidr'], az=module.params['az'], start_time=start_time)
changed = True
# Subnet will be None when check_mode is true
if subnet is None:
return {
'changed': changed,
'subnet': {}
}
if module.params['wait']:
handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time)
if module.params['ipv6_cidr'] != subnet.get('ipv6_cidr_block'):
if ensure_ipv6_cidr_block(conn, module, subnet, module.params['ipv6_cidr'], module.check_mode, start_time):
changed = True
if module.params['map_public'] != subnet['map_public_ip_on_launch']:
ensure_map_public(conn, module, subnet, module.params['map_public'], module.check_mode, start_time)
changed = True
if module.params['assign_instances_ipv6'] != subnet.get('assign_ipv6_address_on_creation'):
ensure_assign_ipv6_on_create(conn, module, subnet, module.params['assign_instances_ipv6'], module.check_mode, start_time)
changed = True
if module.params['tags'] != subnet['tags']:
stringified_tags_dict = dict((to_text(k), to_text(v)) for k, v in module.params['tags'].items())
if ensure_tags(conn, module, subnet, stringified_tags_dict, module.params['purge_tags'], start_time):
changed = True
subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
if not module.check_mode and module.params['wait']:
# GET calls are not monotonic for map_public_ip_on_launch and assign_ipv6_address_on_creation
# so we only wait for those if necessary just before returning the subnet
subnet = ensure_final_subnet(conn, module, subnet, start_time)
return {
'changed': changed,
'subnet': subnet
}
def ensure_final_subnet(conn, module, subnet, start_time):
for rewait in range(0, 30):
map_public_correct = False
assign_ipv6_correct = False
if module.params['map_public'] == subnet['map_public_ip_on_launch']:
map_public_correct = True
else:
if module.params['map_public']:
handle_waiter(conn, module, 'subnet_has_map_public', {'SubnetIds': [subnet['id']]}, start_time)
else:
handle_waiter(conn, module, 'subnet_no_map_public', {'SubnetIds': [subnet['id']]}, start_time)
if module.params['assign_instances_ipv6'] == subnet.get('assign_ipv6_address_on_creation'):
assign_ipv6_correct = True
else:
if module.params['assign_instances_ipv6']:
handle_waiter(conn, module, 'subnet_has_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
else:
handle_waiter(conn, module, 'subnet_no_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time)
if map_public_correct and assign_ipv6_correct:
break
time.sleep(5)
subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
return subnet
def ensure_subnet_absent(conn, module):
subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr'])
if subnet is None:
return {'changed': False}
try:
if not module.check_mode:
conn.delete_subnet(SubnetId=subnet['id'])
if module.params['wait']:
handle_waiter(conn, module, 'subnet_deleted', {'SubnetIds': [subnet['id']]}, time.time())
return {'changed': True}
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg="Couldn't delete subnet")
def main():
argument_spec = dict(
az=dict(default=None, required=False),
cidr=dict(required=True),
ipv6_cidr=dict(default='', required=False),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']),
vpc_id=dict(required=True),
map_public=dict(default=False, required=False, type='bool'),
assign_instances_ipv6=dict(default=False, required=False, type='bool'),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=300, required=False),
purge_tags=dict(default=True, type='bool')
)
required_if = [('assign_instances_ipv6', True, ['ipv6_cidr'])]
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if)
if module.params.get('assign_instances_ipv6') and not module.params.get('ipv6_cidr'):
module.fail_json(msg="assign_instances_ipv6 is True but ipv6_cidr is None or an empty string")
if not module.botocore_at_least("1.7.0"):
module.warn("botocore >= 1.7.0 is required to use wait_timeout for custom wait times")
connection = module.client('ec2')
state = module.params.get('state')
try:
if state == 'present':
result = ensure_subnet_present(connection, module)
elif state == 'absent':
result = ensure_subnet_absent(connection, module)
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
module.exit_json(**result)
if __name__ == '__main__':
main()

@ -1,250 +0,0 @@
#!/usr/bin/python
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_vpc_subnet_info
short_description: Gather information about ec2 VPC subnets in AWS
description:
- Gather information about ec2 VPC subnets in AWS
- This module was called C(ec2_vpc_subnet_facts) before Ansible 2.9. The usage did not change.
version_added: "2.1"
author: "Rob White (@wimnat)"
requirements:
- boto3
- botocore
options:
subnet_ids:
description:
- A list of subnet IDs to gather information for.
version_added: "2.5"
aliases: ['subnet_id']
type: list
elements: str
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters.
type: dict
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all VPC subnets
- ec2_vpc_subnet_info:
# Gather information about a particular VPC subnet using ID
- ec2_vpc_subnet_info:
subnet_ids: subnet-00112233
# Gather information about any VPC subnet with a tag key Name and value Example
- ec2_vpc_subnet_info:
filters:
"tag:Name": Example
# Gather information about any VPC subnet within VPC with ID vpc-abcdef00
- ec2_vpc_subnet_info:
filters:
vpc-id: vpc-abcdef00
# Gather information about a set of VPC subnets, publicA, publicB and publicC within a
# VPC with ID vpc-abcdef00 and then use the jinja map function to return the
# subnet_ids as a list.
- ec2_vpc_subnet_info:
filters:
vpc-id: vpc-abcdef00
"tag:Name": "{{ item }}"
loop:
- publicA
- publicB
- publicC
register: subnet_info
- set_fact:
subnet_ids: "{{ subnet_info.subnets|map(attribute='id')|list }}"
'''
RETURN = '''
subnets:
description: Returns an array of complex objects as described below.
returned: success
type: complex
contains:
subnet_id:
description: The ID of the Subnet.
returned: always
type: str
id:
description: The ID of the Subnet (for backwards compatibility).
returned: always
type: str
vpc_id:
description: The ID of the VPC .
returned: always
type: str
state:
description: The state of the subnet.
returned: always
type: str
tags:
description: A dict of tags associated with the Subnet.
returned: always
type: dict
map_public_ip_on_launch:
description: True/False depending on attribute setting for public IP mapping.
returned: always
type: bool
default_for_az:
description: True if this is the default subnet for AZ.
returned: always
type: bool
cidr_block:
description: The IPv4 CIDR block assigned to the subnet.
returned: always
type: str
available_ip_address_count:
description: Count of available IPs in subnet.
returned: always
type: str
availability_zone:
description: The availability zone where the subnet exists.
returned: always
type: str
assign_ipv6_address_on_creation:
description: True/False depending on attribute setting for IPv6 address assignment.
returned: always
type: bool
ipv6_cidr_block_association_set:
description: An array of IPv6 cidr block association set information.
returned: always
type: complex
contains:
association_id:
description: The association ID
returned: always
type: str
ipv6_cidr_block:
description: The IPv6 CIDR block that is associated with the subnet.
returned: always
type: str
ipv6_cidr_block_state:
description: A hash/dict that contains a single item. The state of the cidr block association.
returned: always
type: dict
contains:
state:
description: The CIDR block association state.
returned: always
type: str
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (
boto3_conn,
ec2_argument_spec,
get_aws_connection_info,
AWSRetry,
HAS_BOTO3,
boto3_tag_list_to_ansible_dict,
camel_dict_to_snake_dict,
ansible_dict_to_boto3_filter_list
)
from ansible.module_utils._text import to_native
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
@AWSRetry.exponential_backoff()
def describe_subnets_with_backoff(connection, subnet_ids, filters):
"""
Describe Subnets with AWSRetry backoff throttling support.
connection : boto3 client connection object
subnet_ids : list of subnet ids for which to gather information
filters : additional filters to apply to request
"""
return connection.describe_subnets(SubnetIds=subnet_ids, Filters=filters)
def describe_subnets(connection, module):
"""
Describe Subnets.
module : AnsibleModule object
connection : boto3 client connection object
"""
# collect parameters
filters = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
subnet_ids = module.params.get('subnet_ids')
if subnet_ids is None:
# Set subnet_ids to empty list if it is None
subnet_ids = []
# init empty list for return vars
subnet_info = list()
# Get the basic VPC info
try:
response = describe_subnets_with_backoff(connection, subnet_ids, filters)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
for subnet in response['Subnets']:
# for backwards compatibility
subnet['id'] = subnet['SubnetId']
subnet_info.append(camel_dict_to_snake_dict(subnet))
# convert tag list to ansible dict
subnet_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(subnet.get('Tags', []))
module.exit_json(subnets=subnet_info)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
subnet_ids=dict(type='list', default=[], aliases=['subnet_id']),
filters=dict(type='dict', default={})
))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if module._name == 'ec2_vpc_subnet_facts':
module.deprecate("The 'ec2_vpc_subnet_facts' module has been renamed to 'ec2_vpc_subnet_info'", version='2.13')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
try:
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
else:
module.fail_json(msg="Region must be specified")
describe_subnets(connection, module)
if __name__ == '__main__':
main()

@ -1,767 +0,0 @@
#!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: s3_bucket
short_description: Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID
description:
- Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID
version_added: "2.0"
requirements: [ boto3 ]
author: "Rob White (@wimnat)"
options:
force:
description:
- When trying to delete a bucket, delete all keys (including versions and delete markers)
in the bucket first (an s3 bucket must be empty for a successful deletion)
type: bool
default: 'no'
name:
description:
- Name of the s3 bucket
required: true
type: str
policy:
description:
- The JSON policy as a string.
type: json
s3_url:
description:
- S3 URL endpoint for usage with DigitalOcean, Ceph, Eucalyptus and fakes3 etc.
- Assumes AWS if not specified.
- For Walrus, use FQDN of the endpoint without scheme nor path.
aliases: [ S3_URL ]
type: str
ceph:
description:
- Enable API compatibility with Ceph. It takes into account the S3 API subset working
with Ceph in order to provide the same module behaviour where possible.
type: bool
version_added: "2.2"
requester_pays:
description:
- With Requester Pays buckets, the requester instead of the bucket owner pays the cost
of the request and the data download from the bucket.
type: bool
default: False
state:
description:
- Create or remove the s3 bucket
required: false
default: present
choices: [ 'present', 'absent' ]
type: str
tags:
description:
- tags dict to apply to bucket
type: dict
purge_tags:
description:
- whether to remove tags that aren't present in the C(tags) parameter
type: bool
default: True
version_added: "2.9"
versioning:
description:
- Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
type: bool
encryption:
description:
- Describes the default server-side encryption to apply to new objects in the bucket.
In order to remove the server-side encryption, the encryption needs to be set to 'none' explicitly.
choices: [ 'none', 'AES256', 'aws:kms' ]
version_added: "2.9"
type: str
encryption_key_id:
description: KMS master key ID to use for the default encryption. This parameter is allowed if encryption is aws:kms. If
not specified then it will default to the AWS provided KMS key.
version_added: "2.9"
type: str
extends_documentation_fragment:
- aws
- ec2
notes:
- If C(requestPayment), C(policy), C(tagging) or C(versioning)
operations/API aren't implemented by the endpoint, module doesn't fail
if each parameter satisfies the following condition.
I(requester_pays) is C(False), I(policy), I(tags), and I(versioning) are C(None).
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create a simple s3 bucket
- s3_bucket:
name: mys3bucket
state: present
# Create a simple s3 bucket on Ceph Rados Gateway
- s3_bucket:
name: mys3bucket
s3_url: http://your-ceph-rados-gateway-server.xxx
ceph: true
# Remove an s3 bucket and any keys it contains
- s3_bucket:
name: mys3bucket
state: absent
force: yes
# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag
- s3_bucket:
name: mys3bucket
policy: "{{ lookup('file','policy.json') }}"
requester_pays: yes
versioning: yes
tags:
example: tag1
another: tag2
# Create a simple DigitalOcean Spaces bucket using their provided regional endpoint
- s3_bucket:
name: mydobucket
s3_url: 'https://nyc3.digitaloceanspaces.com'
# Create a bucket with AES256 encryption
- s3_bucket:
name: mys3bucket
state: present
encryption: "AES256"
# Create a bucket with aws:kms encryption, KMS key
- s3_bucket:
name: mys3bucket
state: present
encryption: "aws:kms"
encryption_key_id: "arn:aws:kms:us-east-1:1234/5678example"
# Create a bucket with aws:kms encryption, default key
- s3_bucket:
name: mys3bucket
state: present
encryption: "aws:kms"
'''
import json
import os
import time
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.module_utils.six import string_types
from ansible.module_utils.basic import to_text
from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
from ansible.module_utils.ec2 import compare_policies, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, AWSRetry
try:
from botocore.exceptions import BotoCoreError, ClientError, EndpointConnectionError, WaiterError
except ImportError:
pass # caught by AnsibleAWSModule
def create_or_update_bucket(s3_client, module, location):
policy = module.params.get("policy")
name = module.params.get("name")
requester_pays = module.params.get("requester_pays")
tags = module.params.get("tags")
purge_tags = module.params.get("purge_tags")
versioning = module.params.get("versioning")
encryption = module.params.get("encryption")
encryption_key_id = module.params.get("encryption_key_id")
changed = False
result = {}
try:
bucket_is_present = bucket_exists(s3_client, name)
except EndpointConnectionError as e:
module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to check bucket presence")
if not bucket_is_present:
try:
bucket_changed = create_bucket(s3_client, name, location)
s3_client.get_waiter('bucket_exists').wait(Bucket=name)
changed = changed or bucket_changed
except WaiterError as e:
module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available')
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed while creating bucket")
# Versioning
try:
versioning_status = get_bucket_versioning(s3_client, name)
except BotoCoreError as exp:
module.fail_json_aws(exp, msg="Failed to get bucket versioning")
except ClientError as exp:
if exp.response['Error']['Code'] != 'NotImplemented' or versioning is not None:
module.fail_json_aws(exp, msg="Failed to get bucket versioning")
else:
if versioning is not None:
required_versioning = None
if versioning and versioning_status.get('Status') != "Enabled":
required_versioning = 'Enabled'
elif not versioning and versioning_status.get('Status') == "Enabled":
required_versioning = 'Suspended'
if required_versioning:
try:
put_bucket_versioning(s3_client, name, required_versioning)
changed = True
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to update bucket versioning")
versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning)
# This output format is there to ensure compatibility with previous versions of the module
result['versioning'] = {
'Versioning': versioning_status.get('Status', 'Disabled'),
'MfaDelete': versioning_status.get('MFADelete', 'Disabled'),
}
# Requester pays
try:
requester_pays_status = get_bucket_request_payment(s3_client, name)
except BotoCoreError as exp:
module.fail_json_aws(exp, msg="Failed to get bucket request payment")
except ClientError as exp:
if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or requester_pays:
module.fail_json_aws(exp, msg="Failed to get bucket request payment")
else:
if requester_pays:
payer = 'Requester' if requester_pays else 'BucketOwner'
if requester_pays_status != payer:
put_bucket_request_payment(s3_client, name, payer)
requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False)
if requester_pays_status is None:
# We have seen that it happens quite a lot of times that the put request was not taken into
# account, so we retry one more time
put_bucket_request_payment(s3_client, name, payer)
requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True)
changed = True
result['requester_pays'] = requester_pays
# Policy
try:
current_policy = get_bucket_policy(s3_client, name)
except BotoCoreError as exp:
module.fail_json_aws(exp, msg="Failed to get bucket policy")
except ClientError as exp:
if exp.response['Error']['Code'] != 'NotImplemented' or policy is not None:
module.fail_json_aws(exp, msg="Failed to get bucket policy")
else:
if policy is not None:
if isinstance(policy, string_types):
policy = json.loads(policy)
if not policy and current_policy:
try:
delete_bucket_policy(s3_client, name)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to delete bucket policy")
current_policy = wait_policy_is_applied(module, s3_client, name, policy)
changed = True
elif compare_policies(current_policy, policy):
try:
put_bucket_policy(s3_client, name, policy)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to update bucket policy")
current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False)
if current_policy is None:
# As for request payement, it happens quite a lot of times that the put request was not taken into
# account, so we retry one more time
put_bucket_policy(s3_client, name, policy)
current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True)
changed = True
result['policy'] = current_policy
# Tags
try:
current_tags_dict = get_current_bucket_tags_dict(s3_client, name)
except BotoCoreError as exp:
module.fail_json_aws(exp, msg="Failed to get bucket tags")
except ClientError as exp:
if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or tags is not None:
module.fail_json_aws(exp, msg="Failed to get bucket tags")
else:
if tags is not None:
# Tags are always returned as text
tags = dict((to_text(k), to_text(v)) for k, v in tags.items())
if not purge_tags:
# Ensure existing tags that aren't updated by desired tags remain
current_copy = current_tags_dict.copy()
current_copy.update(tags)
tags = current_copy
if current_tags_dict != tags:
if tags:
try:
put_bucket_tagging(s3_client, name, tags)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to update bucket tags")
else:
if purge_tags:
try:
delete_bucket_tagging(s3_client, name)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to delete bucket tags")
current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags)
changed = True
result['tags'] = current_tags_dict
# Encryption
try:
current_encryption = get_bucket_encryption(s3_client, name)
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to get bucket encryption")
if encryption is not None:
current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None
current_encryption_key = current_encryption.get('KMSMasterKeyID') if current_encryption else None
if encryption == 'none' and current_encryption_algorithm is not None:
try:
delete_bucket_encryption(s3_client, name)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to delete bucket encryption")
current_encryption = wait_encryption_is_applied(module, s3_client, name, None)
changed = True
elif encryption != 'none' and (encryption != current_encryption_algorithm) or (encryption == 'aws:kms' and current_encryption_key != encryption_key_id):
expected_encryption = {'SSEAlgorithm': encryption}
if encryption == 'aws:kms' and encryption_key_id is not None:
expected_encryption.update({'KMSMasterKeyID': encryption_key_id})
current_encryption = put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption)
changed = True
result['encryption'] = current_encryption
module.exit_json(changed=changed, name=name, **result)
def bucket_exists(s3_client, bucket_name):
# head_bucket appeared to be really inconsistent, so we use list_buckets instead,
# and loop over all the buckets, even if we know it's less performant :(
all_buckets = s3_client.list_buckets(Bucket=bucket_name)['Buckets']
return any(bucket['Name'] == bucket_name for bucket in all_buckets)
@AWSRetry.exponential_backoff(max_delay=120)
def create_bucket(s3_client, bucket_name, location):
try:
configuration = {}
if location not in ('us-east-1', None):
configuration['LocationConstraint'] = location
if len(configuration) > 0:
s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=configuration)
else:
s3_client.create_bucket(Bucket=bucket_name)
return True
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'BucketAlreadyOwnedByYou':
# We should never get there since we check the bucket presence before calling the create_or_update_bucket
# method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception
return False
else:
raise e
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
def put_bucket_tagging(s3_client, bucket_name, tags):
s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)})
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
def put_bucket_policy(s3_client, bucket_name, policy):
s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
def delete_bucket_policy(s3_client, bucket_name):
s3_client.delete_bucket_policy(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
def get_bucket_policy(s3_client, bucket_name):
try:
current_policy = json.loads(s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy'))
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchBucketPolicy':
current_policy = None
else:
raise e
return current_policy
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
def put_bucket_request_payment(s3_client, bucket_name, payer):
s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer})
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
def get_bucket_request_payment(s3_client, bucket_name):
return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer')
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
def get_bucket_versioning(s3_client, bucket_name):
return s3_client.get_bucket_versioning(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
def put_bucket_versioning(s3_client, bucket_name, required_versioning):
s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning})
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
def get_bucket_encryption(s3_client, bucket_name):
if not hasattr(s3_client, "get_bucket_encryption"):
return None
try:
result = s3_client.get_bucket_encryption(Bucket=bucket_name)
return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('ApplyServerSideEncryptionByDefault')
except ClientError as e:
if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError':
return None
else:
raise e
except (IndexError, KeyError):
return None
def put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption):
max_retries = 3
for retries in range(1, max_retries + 1):
try:
put_bucket_encryption(s3_client, name, expected_encryption)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to set bucket encryption")
current_encryption = wait_encryption_is_applied(module, s3_client, name, expected_encryption,
should_fail=(retries == max_retries), retries=5)
if current_encryption == expected_encryption:
return current_encryption
# We shouldn't get here, the only time this should happen is if
# current_encryption != expected_encryption and retries == max_retries
# Which should use module.fail_json and fail out first.
module.fail_json(msg='Failed to apply bucket encryption',
current=current_encryption, expected=expected_encryption, retries=retries)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
def put_bucket_encryption(s3_client, bucket_name, encryption):
server_side_encryption_configuration = {'Rules': [{'ApplyServerSideEncryptionByDefault': encryption}]}
s3_client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
def delete_bucket_tagging(s3_client, bucket_name):
s3_client.delete_bucket_tagging(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted'])
def delete_bucket_encryption(s3_client, bucket_name):
s3_client.delete_bucket_encryption(Bucket=bucket_name)
@AWSRetry.exponential_backoff(max_delay=240, catch_extra_error_codes=['OperationAborted'])
def delete_bucket(s3_client, bucket_name):
try:
s3_client.delete_bucket(Bucket=bucket_name)
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchBucket':
# This means bucket should have been in a deleting state when we checked it existence
# We just ignore the error
pass
else:
raise e
def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True):
for dummy in range(0, 12):
try:
current_policy = get_bucket_policy(s3_client, bucket_name)
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to get bucket policy")
if compare_policies(current_policy, expected_policy):
time.sleep(5)
else:
return current_policy
if should_fail:
module.fail_json(msg="Bucket policy failed to apply in the expected time",
requested_policy=expected_policy, live_policy=current_policy)
else:
return None
def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True):
for dummy in range(0, 12):
try:
requester_pays_status = get_bucket_request_payment(s3_client, bucket_name)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to get bucket request payment")
if requester_pays_status != expected_payer:
time.sleep(5)
else:
return requester_pays_status
if should_fail:
module.fail_json(msg="Bucket request payment failed to apply in the expected time",
requested_status=expected_payer, live_status=requester_pays_status)
else:
return None
def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encryption, should_fail=True, retries=12):
for dummy in range(0, retries):
try:
encryption = get_bucket_encryption(s3_client, bucket_name)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to get updated encryption for bucket")
if encryption != expected_encryption:
time.sleep(5)
else:
return encryption
if should_fail:
module.fail_json(msg="Bucket encryption failed to apply in the expected time",
requested_encryption=expected_encryption, live_encryption=encryption)
return encryption
def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning):
for dummy in range(0, 24):
try:
versioning_status = get_bucket_versioning(s3_client, bucket_name)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to get updated versioning for bucket")
if versioning_status.get('Status') != required_versioning:
time.sleep(8)
else:
return versioning_status
module.fail_json(msg="Bucket versioning failed to apply in the expected time",
requested_versioning=required_versioning, live_versioning=versioning_status)
def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict):
for dummy in range(0, 12):
try:
current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name)
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Failed to get bucket policy")
if current_tags_dict != expected_tags_dict:
time.sleep(5)
else:
return current_tags_dict
module.fail_json(msg="Bucket tags failed to apply in the expected time",
requested_tags=expected_tags_dict, live_tags=current_tags_dict)
def get_current_bucket_tags_dict(s3_client, bucket_name):
try:
current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet')
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchTagSet':
return {}
raise e
return boto3_tag_list_to_ansible_dict(current_tags)
def paginated_list(s3_client, **pagination_params):
pg = s3_client.get_paginator('list_objects_v2')
for page in pg.paginate(**pagination_params):
yield [data['Key'] for data in page.get('Contents', [])]
def paginated_versions_list(s3_client, **pagination_params):
try:
pg = s3_client.get_paginator('list_object_versions')
for page in pg.paginate(**pagination_params):
# We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion
yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))]
except is_boto3_error_code('NoSuchBucket'):
yield []
def destroy_bucket(s3_client, module):
force = module.params.get("force")
name = module.params.get("name")
try:
bucket_is_present = bucket_exists(s3_client, name)
except EndpointConnectionError as e:
module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to check bucket presence")
if not bucket_is_present:
module.exit_json(changed=False)
if force:
# if there are contents then we need to delete them (including versions) before we can delete the bucket
try:
for key_version_pairs in paginated_versions_list(s3_client, Bucket=name):
formatted_keys = [{'Key': key, 'VersionId': version} for key, version in key_version_pairs]
for fk in formatted_keys:
# remove VersionId from cases where they are `None` so that
# unversioned objects are deleted using `DeleteObject`
# rather than `DeleteObjectVersion`, improving backwards
# compatibility with older IAM policies.
if not fk.get('VersionId'):
fk.pop('VersionId')
if formatted_keys:
resp = s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys})
if resp.get('Errors'):
module.fail_json(
msg='Could not empty bucket before deleting. Could not delete objects: {0}'.format(
', '.join([k['Key'] for k in resp['Errors']])
),
errors=resp['Errors'], response=resp
)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed while deleting bucket")
try:
delete_bucket(s3_client, name)
s3_client.get_waiter('bucket_not_exists').wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60))
except WaiterError as e:
module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.')
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg="Failed to delete bucket")
module.exit_json(changed=True)
def is_fakes3(s3_url):
""" Return True if s3_url has scheme fakes3:// """
if s3_url is not None:
return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
else:
return False
def get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url):
if s3_url and ceph: # TODO - test this
ceph = urlparse(s3_url)
params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
elif is_fakes3(s3_url):
fakes3 = urlparse(s3_url)
port = fakes3.port
if fakes3.scheme == 'fakes3s':
protocol = "https"
if port is None:
port = 443
else:
protocol = "http"
if port is None:
port = 80
params = dict(module=module, conn_type='client', resource='s3', region=location,
endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
else:
params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
return boto3_conn(**params)
def main():
argument_spec = dict(
force=dict(default=False, type='bool'),
policy=dict(type='json'),
name=dict(required=True),
requester_pays=dict(default=False, type='bool'),
s3_url=dict(aliases=['S3_URL']),
state=dict(default='present', choices=['present', 'absent']),
tags=dict(type='dict'),
purge_tags=dict(type='bool', default=True),
versioning=dict(type='bool'),
ceph=dict(default=False, type='bool'),
encryption=dict(choices=['none', 'AES256', 'aws:kms']),
encryption_key_id=dict()
)
required_by = dict(
encryption_key_id=('encryption',),
)
module = AnsibleAWSModule(
argument_spec=argument_spec, required_by=required_by
)
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if region in ('us-east-1', '', None):
# default to US Standard region
location = 'us-east-1'
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
s3_url = module.params.get('s3_url')
ceph = module.params.get('ceph')
# allow eucarc environment variables to be used if ansible vars aren't set
if not s3_url and 'S3_URL' in os.environ:
s3_url = os.environ['S3_URL']
if ceph and not s3_url:
module.fail_json(msg='ceph flavour requires s3_url')
# Look at s3_url and tweak connection settings
# if connecting to Ceph RGW, Walrus or fakes3
if s3_url:
for key in ['validate_certs', 'security_token', 'profile_name']:
aws_connect_kwargs.pop(key, None)
s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url)
if s3_client is None: # this should never happen
module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.')
state = module.params.get("state")
encryption = module.params.get("encryption")
encryption_key_id = module.params.get("encryption_key_id")
if not hasattr(s3_client, "get_bucket_encryption"):
if encryption is not None:
module.fail_json(msg="Using bucket encryption requires botocore version >= 1.7.41")
# Parameter validation
if encryption_key_id is not None and encryption != 'aws:kms':
module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.")
if state == 'present':
create_or_update_bucket(s3_client, module, location)
elif state == 'absent':
destroy_bucket(s3_client, module)
if __name__ == '__main__':
main()

@ -1,71 +0,0 @@
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2018, Will Thames <will@thames.id.au>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleFileNotFound
from ansible.module_utils._text import to_text
from ansible.plugins.action import ActionBase
from ansible.utils.vars import merge_hash
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
''' handler for aws_s3 operations '''
self._supports_async = True
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
source = self._task.args.get('src', None)
try:
new_module_args = self._task.args.copy()
if source:
source = os.path.expanduser(source)
# For backward compatibility check if the file exists on the remote; it should take precedence
if not self._remote_file_exists(source):
try:
source = self._loader.get_real_file(self._find_needle('files', source), decrypt=False)
new_module_args['src'] = source
except AnsibleFileNotFound as e:
# module handles error message for nonexistent files
new_module_args['src'] = source
except AnsibleError as e:
raise AnsibleActionFail(to_text(e))
wrap_async = self._task.async_val and not self._connection.has_native_async
# execute the aws_s3 module with the updated args
result = merge_hash(result, self._execute_module(module_args=new_module_args, task_vars=task_vars, wrap_async=wrap_async))
if not wrap_async:
# remove a temporary path we created
self._remove_tmp_path(self._connection._shell.tmpdir)
except AnsibleAction as e:
result.update(e.result)
return result

@ -1,72 +0,0 @@
# (C) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: aws_resource_actions
type: aggregate
short_description: summarizes all "resource:actions" completed
version_added: "2.8"
description:
- Ansible callback plugin for collecting the AWS actions completed by all boto3 modules using
AnsibleAWSModule in a playbook. Botocore endpoint logs need to be enabled for those modules, which can
be done easily by setting debug_botocore_endpoint_logs to True for group/aws using module_defaults.
requirements:
- whitelisting in configuration - see examples section below for details.
'''
EXAMPLES = '''
example: >
To enable, add this to your ansible.cfg file in the defaults block
[defaults]
callback_whitelist = aws_resource_actions
sample output: >
#
# AWS ACTIONS: ['s3:PutBucketAcl', 's3:HeadObject', 's3:DeleteObject', 's3:PutObjectAcl', 's3:CreateMultipartUpload',
# 's3:DeleteBucket', 's3:GetObject', 's3:DeleteObjects', 's3:CreateBucket', 's3:CompleteMultipartUpload',
# 's3:ListObjectsV2', 's3:HeadBucket', 's3:UploadPart', 's3:PutObject']
#
sample output: >
#
# AWS ACTIONS: ['ec2:DescribeVpcAttribute', 'ec2:DescribeVpcClassicLink', 'ec2:ModifyVpcAttribute', 'ec2:CreateTags',
# 'sts:GetCallerIdentity', 'ec2:DescribeSecurityGroups', 'ec2:DescribeTags', 'ec2:DescribeVpcs', 'ec2:CreateVpc']
#
'''
from ansible.plugins.callback import CallbackBase
from ansible.module_utils._text import to_native
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.8
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'aws_resource_actions'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
self.aws_resource_actions = []
super(CallbackModule, self).__init__()
def extend_aws_resource_actions(self, result):
if result.get('resource_actions'):
self.aws_resource_actions.extend(result['resource_actions'])
def runner_on_ok(self, host, res):
self.extend_aws_resource_actions(res)
def runner_on_failed(self, host, res, ignore_errors=False):
self.extend_aws_resource_actions(res)
def v2_runner_item_on_ok(self, result):
self.extend_aws_resource_actions(result._result)
def v2_runner_item_on_failed(self, result):
self.extend_aws_resource_actions(result._result)
def playbook_on_stats(self, stats):
if self.aws_resource_actions:
self.aws_resource_actions = sorted(list(to_native(action) for action in set(self.aws_resource_actions)))
self._display.display("AWS ACTIONS: {0}".format(self.aws_resource_actions))

@ -1,75 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Will Thames <will@thames.id.au>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# AWS only documentation fragment
DOCUMENTATION = r'''
options:
debug_botocore_endpoint_logs:
description:
- Use a botocore.endpoint logger to parse the unique (rather than total) "resource:action" API calls made during a task, outputing
the set to the resource_actions key in the task results. Use the aws_resource_action callback to output to total list made during
a playbook. The ANSIBLE_DEBUG_BOTOCORE_LOGS environment variable may also be used.
type: bool
default: 'no'
version_added: "2.8"
ec2_url:
description:
- Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints).
Ignored for modules where region is required. Must be specified for all other modules if region is not used.
If not set then the value of the EC2_URL environment variable, if any, is used.
type: str
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_ACCESS_KEY, AWS_SECRET_KEY, or EC2_SECRET_KEY environment variable is used.
type: str
aliases: [ ec2_secret_key, secret_key ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY_ID, AWS_ACCESS_KEY or EC2_ACCESS_KEY environment variable is used.
type: str
aliases: [ ec2_access_key, access_key ]
security_token:
description:
- AWS STS security token. If not set then the value of the AWS_SECURITY_TOKEN or EC2_SECURITY_TOKEN environment variable is used.
type: str
aliases: [ access_token ]
version_added: "1.6"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
type: bool
default: yes
version_added: "1.5"
profile:
description:
- Uses a boto profile. Only works with boto >= 2.24.0.
type: str
version_added: "1.6"
aws_config:
description:
- A dictionary to modify the botocore configuration.
- Parameters can be found at U(https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html#botocore.config.Config).
- Only the 'user_agent' key is used for boto modules. See U(http://boto.cloudhackers.com/en/latest/boto_config_tut.html#boto) for more boto configuration.
type: dict
version_added: "2.10"
requirements:
- python >= 2.6
- boto
notes:
- If parameters are not set within the module, the following
environment variables can be used in decreasing order of precedence
C(AWS_URL) or C(EC2_URL),
C(AWS_ACCESS_KEY_ID) or C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY),
C(AWS_SECRET_ACCESS_KEY) or C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY),
C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN),
C(AWS_REGION) or C(EC2_REGION)
- Ansible uses the boto configuration file (typically ~/.boto) if no
credentials are provided. See https://boto.readthedocs.io/en/latest/boto_config_tut.html
- C(AWS_REGION) or C(EC2_REGION) can be typically be used to specify the
AWS region, when required, but this can also be configured in the boto config file
'''

@ -1,42 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Plugin options for AWS credentials
DOCUMENTATION = r'''
options:
aws_profile:
description: The AWS profile
type: str
aliases: [ boto_profile ]
env:
- name: AWS_DEFAULT_PROFILE
- name: AWS_PROFILE
aws_access_key:
description: The AWS access key to use.
type: str
aliases: [ aws_access_key_id ]
env:
- name: EC2_ACCESS_KEY
- name: AWS_ACCESS_KEY
- name: AWS_ACCESS_KEY_ID
aws_secret_key:
description: The AWS secret key that corresponds to the access key.
type: str
aliases: [ aws_secret_access_key ]
env:
- name: EC2_SECRET_KEY
- name: AWS_SECRET_KEY
- name: AWS_SECRET_ACCESS_KEY
aws_security_token:
description: The AWS security token if using temporary access and secret keys.
type: str
env:
- name: EC2_SECURITY_TOKEN
- name: AWS_SESSION_TOKEN
- name: AWS_SECURITY_TOKEN
'''

@ -1,18 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Plugin option for AWS region
DOCUMENTATION = r'''
options:
region:
description: The region for which to create the connection.
type: str
env:
- name: EC2_REGION
- name: AWS_REGION
'''

@ -1,18 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Ansible, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# EC2 only documentation fragment
DOCUMENTATION = r'''
options:
region:
description:
- The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
type: str
aliases: [ aws_region, ec2_region ]
'''

@ -1,659 +0,0 @@
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: aws_ec2
plugin_type: inventory
short_description: EC2 inventory source
requirements:
- boto3
- botocore
extends_documentation_fragment:
- inventory_cache
- constructed
- aws_credentials
description:
- Get inventory hosts from Amazon Web Services EC2.
- Uses a YAML configuration file that ends with C(aws_ec2.(yml|yaml)).
notes:
- If no credentials are provided and the control node has an associated IAM instance profile then the
role will be used for authentication.
author:
- Sloane Hertel (@s-hertel)
options:
plugin:
description: Token that ensures this is a source file for the plugin.
required: True
choices: ['aws_ec2']
iam_role_arn:
description: The ARN of the IAM role to assume to perform the inventory lookup. You should still provide AWS
credentials with enough privilege to perform the AssumeRole action.
version_added: '2.9'
regions:
description:
- A list of regions in which to describe EC2 instances.
- If empty (the default) default this will include all regions, except possibly restricted ones like us-gov-west-1 and cn-north-1.
type: list
default: []
hostnames:
description:
- A list in order of precedence for hostname variables.
- You can use the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
- To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag.
type: list
default: []
filters:
description:
- A dictionary of filter value pairs.
- Available filters are listed here U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options).
type: dict
default: {}
include_extra_api_calls:
description:
- Add two additional API calls for every instance to include 'persistent' and 'events' host variables.
- Spot instances may be persistent and instances may have associated events.
type: bool
default: False
version_added: '2.8'
strict_permissions:
description:
- By default if a 403 (Forbidden) error code is encountered this plugin will fail.
- You can set this option to False in the inventory config file which will allow 403 errors to be gracefully skipped.
type: bool
default: True
use_contrib_script_compatible_sanitization:
description:
- By default this plugin is using a general group name sanitization to create safe and usable group names for use in Ansible.
This option allows you to override that, in efforts to allow migration from the old inventory script and
matches the sanitization of groups when the script's ``replace_dash_in_groups`` option is set to ``False``.
To replicate behavior of ``replace_dash_in_groups = True`` with constructed groups,
you will need to replace hyphens with underscores via the regex_replace filter for those entries.
- For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting,
otherwise the core engine will just use the standard sanitization on top.
- This is not the default as such names break certain functionality as not all characters are valid Python identifiers
which group names end up being used as.
type: bool
default: False
version_added: '2.8'
'''
EXAMPLES = '''
# Minimal example using environment vars or instance role credentials
# Fetch all hosts in us-east-1, the hostname is the public DNS if it exists, otherwise the private IP address
plugin: aws_ec2
regions:
- us-east-1
# Example using filters, ignoring permission errors, and specifying the hostname precedence
plugin: aws_ec2
boto_profile: aws_profile
# Populate inventory with instances in these regions
regions:
- us-east-1
- us-east-2
filters:
# All instances with their `Environment` tag set to `dev`
tag:Environment: dev
# All dev and QA hosts
tag:Environment:
- dev
- qa
instance.group-id: sg-xxxxxxxx
# Ignores 403 errors rather than failing
strict_permissions: False
# Note: I(hostnames) sets the inventory_hostname. To modify ansible_host without modifying
# inventory_hostname use compose (see example below).
hostnames:
- tag:Name=Tag1,Name=Tag2 # Return specific hosts only
- tag:CustomDNSName
- dns-name
- private-ip-address
# Example using constructed features to create groups and set ansible_host
plugin: aws_ec2
regions:
- us-east-1
- us-west-1
# keyed_groups may be used to create custom groups
strict: False
keyed_groups:
# Add e.g. x86_64 hosts to an arch_x86_64 group
- prefix: arch
key: 'architecture'
# Add hosts to tag_Name_Value groups for each Name/Value tag pair
- prefix: tag
key: tags
# Add hosts to e.g. instance_type_z3_tiny
- prefix: instance_type
key: instance_type
# Create security_groups_sg_abcd1234 group for each SG
- key: 'security_groups|json_query("[].group_id")'
prefix: 'security_groups'
# Create a group for each value of the Application tag
- key: tags.Application
separator: ''
# Create a group per region e.g. aws_region_us_east_2
- key: placement.region
prefix: aws_region
# Create a group (or groups) based on the value of a custom tag "Role" and add them to a metagroup called "project"
- key: tags['Role']
prefix: foo
parent_group: "project"
# Set individual variables with compose
compose:
# Use the private IP address to connect to the host
# (note: this does not modify inventory_hostname, which is set via I(hostnames))
ansible_host: private_ip_address
'''
import re
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.utils.display import Display
try:
import boto3
import botocore
except ImportError:
raise AnsibleError('The ec2 dynamic inventory plugin requires boto3 and botocore.')
display = Display()
# The mappings give an array of keys to get from the filter name to the value
# returned by boto3's EC2 describe_instances method.
instance_meta_filter_to_boto_attr = {
'group-id': ('Groups', 'GroupId'),
'group-name': ('Groups', 'GroupName'),
'network-interface.attachment.instance-owner-id': ('OwnerId',),
'owner-id': ('OwnerId',),
'requester-id': ('RequesterId',),
'reservation-id': ('ReservationId',),
}
instance_data_filter_to_boto_attr = {
'affinity': ('Placement', 'Affinity'),
'architecture': ('Architecture',),
'availability-zone': ('Placement', 'AvailabilityZone'),
'block-device-mapping.attach-time': ('BlockDeviceMappings', 'Ebs', 'AttachTime'),
'block-device-mapping.delete-on-termination': ('BlockDeviceMappings', 'Ebs', 'DeleteOnTermination'),
'block-device-mapping.device-name': ('BlockDeviceMappings', 'DeviceName'),
'block-device-mapping.status': ('BlockDeviceMappings', 'Ebs', 'Status'),
'block-device-mapping.volume-id': ('BlockDeviceMappings', 'Ebs', 'VolumeId'),
'client-token': ('ClientToken',),
'dns-name': ('PublicDnsName',),
'host-id': ('Placement', 'HostId'),
'hypervisor': ('Hypervisor',),
'iam-instance-profile.arn': ('IamInstanceProfile', 'Arn'),
'image-id': ('ImageId',),
'instance-id': ('InstanceId',),
'instance-lifecycle': ('InstanceLifecycle',),
'instance-state-code': ('State', 'Code'),
'instance-state-name': ('State', 'Name'),
'instance-type': ('InstanceType',),
'instance.group-id': ('SecurityGroups', 'GroupId'),
'instance.group-name': ('SecurityGroups', 'GroupName'),
'ip-address': ('PublicIpAddress',),
'kernel-id': ('KernelId',),
'key-name': ('KeyName',),
'launch-index': ('AmiLaunchIndex',),
'launch-time': ('LaunchTime',),
'monitoring-state': ('Monitoring', 'State'),
'network-interface.addresses.private-ip-address': ('NetworkInterfaces', 'PrivateIpAddress'),
'network-interface.addresses.primary': ('NetworkInterfaces', 'PrivateIpAddresses', 'Primary'),
'network-interface.addresses.association.public-ip': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'PublicIp'),
'network-interface.addresses.association.ip-owner-id': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'IpOwnerId'),
'network-interface.association.public-ip': ('NetworkInterfaces', 'Association', 'PublicIp'),
'network-interface.association.ip-owner-id': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
'network-interface.association.allocation-id': ('ElasticGpuAssociations', 'ElasticGpuId'),
'network-interface.association.association-id': ('ElasticGpuAssociations', 'ElasticGpuAssociationId'),
'network-interface.attachment.attachment-id': ('NetworkInterfaces', 'Attachment', 'AttachmentId'),
'network-interface.attachment.instance-id': ('InstanceId',),
'network-interface.attachment.device-index': ('NetworkInterfaces', 'Attachment', 'DeviceIndex'),
'network-interface.attachment.status': ('NetworkInterfaces', 'Attachment', 'Status'),
'network-interface.attachment.attach-time': ('NetworkInterfaces', 'Attachment', 'AttachTime'),
'network-interface.attachment.delete-on-termination': ('NetworkInterfaces', 'Attachment', 'DeleteOnTermination'),
'network-interface.availability-zone': ('Placement', 'AvailabilityZone'),
'network-interface.description': ('NetworkInterfaces', 'Description'),
'network-interface.group-id': ('NetworkInterfaces', 'Groups', 'GroupId'),
'network-interface.group-name': ('NetworkInterfaces', 'Groups', 'GroupName'),
'network-interface.ipv6-addresses.ipv6-address': ('NetworkInterfaces', 'Ipv6Addresses', 'Ipv6Address'),
'network-interface.mac-address': ('NetworkInterfaces', 'MacAddress'),
'network-interface.network-interface-id': ('NetworkInterfaces', 'NetworkInterfaceId'),
'network-interface.owner-id': ('NetworkInterfaces', 'OwnerId'),
'network-interface.private-dns-name': ('NetworkInterfaces', 'PrivateDnsName'),
# 'network-interface.requester-id': (),
'network-interface.requester-managed': ('NetworkInterfaces', 'Association', 'IpOwnerId'),
'network-interface.status': ('NetworkInterfaces', 'Status'),
'network-interface.source-dest-check': ('NetworkInterfaces', 'SourceDestCheck'),
'network-interface.subnet-id': ('NetworkInterfaces', 'SubnetId'),
'network-interface.vpc-id': ('NetworkInterfaces', 'VpcId'),
'placement-group-name': ('Placement', 'GroupName'),
'platform': ('Platform',),
'private-dns-name': ('PrivateDnsName',),
'private-ip-address': ('PrivateIpAddress',),
'product-code': ('ProductCodes', 'ProductCodeId'),
'product-code.type': ('ProductCodes', 'ProductCodeType'),
'ramdisk-id': ('RamdiskId',),
'reason': ('StateTransitionReason',),
'root-device-name': ('RootDeviceName',),
'root-device-type': ('RootDeviceType',),
'source-dest-check': ('SourceDestCheck',),
'spot-instance-request-id': ('SpotInstanceRequestId',),
'state-reason-code': ('StateReason', 'Code'),
'state-reason-message': ('StateReason', 'Message'),
'subnet-id': ('SubnetId',),
'tag': ('Tags',),
'tag-key': ('Tags',),
'tag-value': ('Tags',),
'tenancy': ('Placement', 'Tenancy'),
'virtualization-type': ('VirtualizationType',),
'vpc-id': ('VpcId',),
}
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
NAME = 'aws_ec2'
def __init__(self):
super(InventoryModule, self).__init__()
self.group_prefix = 'aws_ec2_'
# credentials
self.boto_profile = None
self.aws_secret_access_key = None
self.aws_access_key_id = None
self.aws_security_token = None
self.iam_role_arn = None
def _compile_values(self, obj, attr):
'''
:param obj: A list or dict of instance attributes
:param attr: A key
:return The value(s) found via the attr
'''
if obj is None:
return
temp_obj = []
if isinstance(obj, list) or isinstance(obj, tuple):
for each in obj:
value = self._compile_values(each, attr)
if value:
temp_obj.append(value)
else:
temp_obj = obj.get(attr)
has_indexes = any([isinstance(temp_obj, list), isinstance(temp_obj, tuple)])
if has_indexes and len(temp_obj) == 1:
return temp_obj[0]
return temp_obj
def _get_boto_attr_chain(self, filter_name, instance):
'''
:param filter_name: The filter
:param instance: instance dict returned by boto3 ec2 describe_instances()
'''
allowed_filters = sorted(list(instance_data_filter_to_boto_attr.keys()) + list(instance_meta_filter_to_boto_attr.keys()))
if filter_name not in allowed_filters:
raise AnsibleError("Invalid filter '%s' provided; filter must be one of %s." % (filter_name,
allowed_filters))
if filter_name in instance_data_filter_to_boto_attr:
boto_attr_list = instance_data_filter_to_boto_attr[filter_name]
else:
boto_attr_list = instance_meta_filter_to_boto_attr[filter_name]
instance_value = instance
for attribute in boto_attr_list:
instance_value = self._compile_values(instance_value, attribute)
return instance_value
def _get_credentials(self):
'''
:return A dictionary of boto client credentials
'''
boto_params = {}
for credential in (('aws_access_key_id', self.aws_access_key_id),
('aws_secret_access_key', self.aws_secret_access_key),
('aws_session_token', self.aws_security_token)):
if credential[1]:
boto_params[credential[0]] = credential[1]
return boto_params
def _get_connection(self, credentials, region='us-east-1'):
try:
connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **credentials)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
if self.boto_profile:
try:
connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
else:
raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
return connection
def _boto3_assume_role(self, credentials, region):
"""
Assume an IAM role passed by iam_role_arn parameter
:return: a dict containing the credentials of the assumed role
"""
iam_role_arn = self.iam_role_arn
try:
sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials)
sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_ec2_dynamic_inventory')
return dict(
aws_access_key_id=sts_session['Credentials']['AccessKeyId'],
aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'],
aws_session_token=sts_session['Credentials']['SessionToken']
)
except botocore.exceptions.ClientError as e:
raise AnsibleError("Unable to assume IAM role: %s" % to_native(e))
def _boto3_conn(self, regions):
'''
:param regions: A list of regions to create a boto3 client
Generator that yields a boto3 client and the region
'''
credentials = self._get_credentials()
iam_role_arn = self.iam_role_arn
if not regions:
try:
# as per https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-regions-avail-zones.html
client = self._get_connection(credentials)
resp = client.describe_regions()
regions = [x['RegionName'] for x in resp.get('Regions', [])]
except botocore.exceptions.NoRegionError:
# above seems to fail depending on boto3 version, ignore and lets try something else
pass
# fallback to local list hardcoded in boto3 if still no regions
if not regions:
session = boto3.Session()
regions = session.get_available_regions('ec2')
# I give up, now you MUST give me regions
if not regions:
raise AnsibleError('Unable to get regions list from available methods, you must specify the "regions" option to continue.')
for region in regions:
connection = self._get_connection(credentials, region)
try:
if iam_role_arn is not None:
assumed_credentials = self._boto3_assume_role(credentials, region)
else:
assumed_credentials = credentials
connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **assumed_credentials)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
if self.boto_profile:
try:
connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
else:
raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
yield connection, region
def _get_instances_by_region(self, regions, filters, strict_permissions):
'''
:param regions: a list of regions in which to describe instances
:param filters: a list of boto3 filter dictionaries
:param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
:return A list of instance dictionaries
'''
all_instances = []
for connection, region in self._boto3_conn(regions):
try:
# By default find non-terminated/terminating instances
if not any([f['Name'] == 'instance-state-name' for f in filters]):
filters.append({'Name': 'instance-state-name', 'Values': ['running', 'pending', 'stopping', 'stopped']})
paginator = connection.get_paginator('describe_instances')
reservations = paginator.paginate(Filters=filters).build_full_result().get('Reservations')
instances = []
for r in reservations:
new_instances = r['Instances']
for instance in new_instances:
instance.update(self._get_reservation_details(r))
if self.get_option('include_extra_api_calls'):
instance.update(self._get_event_set_and_persistence(connection, instance['InstanceId'], instance.get('SpotInstanceRequestId')))
instances.extend(new_instances)
except botocore.exceptions.ClientError as e:
if e.response['ResponseMetadata']['HTTPStatusCode'] == 403 and not strict_permissions:
instances = []
else:
raise AnsibleError("Failed to describe instances: %s" % to_native(e))
except botocore.exceptions.BotoCoreError as e:
raise AnsibleError("Failed to describe instances: %s" % to_native(e))
all_instances.extend(instances)
return sorted(all_instances, key=lambda x: x['InstanceId'])
def _get_reservation_details(self, reservation):
return {
'OwnerId': reservation['OwnerId'],
'RequesterId': reservation.get('RequesterId', ''),
'ReservationId': reservation['ReservationId']
}
def _get_event_set_and_persistence(self, connection, instance_id, spot_instance):
host_vars = {'Events': '', 'Persistent': False}
try:
kwargs = {'InstanceIds': [instance_id]}
host_vars['Events'] = connection.describe_instance_status(**kwargs)['InstanceStatuses'][0].get('Events', '')
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
if not self.get_option('strict_permissions'):
pass
else:
raise AnsibleError("Failed to describe instance status: %s" % to_native(e))
if spot_instance:
try:
kwargs = {'SpotInstanceRequestIds': [spot_instance]}
host_vars['Persistent'] = bool(
connection.describe_spot_instance_requests(**kwargs)['SpotInstanceRequests'][0].get('Type') == 'persistent'
)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
if not self.get_option('strict_permissions'):
pass
else:
raise AnsibleError("Failed to describe spot instance requests: %s" % to_native(e))
return host_vars
def _get_tag_hostname(self, preference, instance):
tag_hostnames = preference.split('tag:', 1)[1]
if ',' in tag_hostnames:
tag_hostnames = tag_hostnames.split(',')
else:
tag_hostnames = [tag_hostnames]
tags = boto3_tag_list_to_ansible_dict(instance.get('Tags', []))
for v in tag_hostnames:
if '=' in v:
tag_name, tag_value = v.split('=')
if tags.get(tag_name) == tag_value:
return to_text(tag_name) + "_" + to_text(tag_value)
else:
tag_value = tags.get(v)
if tag_value:
return to_text(tag_value)
return None
def _get_hostname(self, instance, hostnames):
'''
:param instance: an instance dict returned by boto3 ec2 describe_instances()
:param hostnames: a list of hostname destination variables in order of preference
:return the preferred identifer for the host
'''
if not hostnames:
hostnames = ['dns-name', 'private-dns-name']
hostname = None
for preference in hostnames:
if 'tag' in preference:
if not preference.startswith('tag:'):
raise AnsibleError("To name a host by tags name_value, use 'tag:name=value'.")
hostname = self._get_tag_hostname(preference, instance)
else:
hostname = self._get_boto_attr_chain(preference, instance)
if hostname:
break
if hostname:
if ':' in to_text(hostname):
return self._sanitize_group_name((to_text(hostname)))
else:
return to_text(hostname)
def _query(self, regions, filters, strict_permissions):
'''
:param regions: a list of regions to query
:param filters: a list of boto3 filter dictionaries
:param hostnames: a list of hostname destination variables in order of preference
:param strict_permissions: a boolean determining whether to fail or ignore 403 error codes
'''
return {'aws_ec2': self._get_instances_by_region(regions, filters, strict_permissions)}
def _populate(self, groups, hostnames):
for group in groups:
group = self.inventory.add_group(group)
self._add_hosts(hosts=groups[group], group=group, hostnames=hostnames)
self.inventory.add_child('all', group)
def _add_hosts(self, hosts, group, hostnames):
'''
:param hosts: a list of hosts to be added to a group
:param group: the name of the group to which the hosts belong
:param hostnames: a list of hostname destination variables in order of preference
'''
for host in hosts:
hostname = self._get_hostname(host, hostnames)
host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])
host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))
# Allow easier grouping by region
host['placement']['region'] = host['placement']['availability_zone'][:-1]
if not hostname:
continue
self.inventory.add_host(hostname, group=group)
for hostvar, hostval in host.items():
self.inventory.set_variable(hostname, hostvar, hostval)
# Use constructed if applicable
strict = self.get_option('strict')
# Composed variables
self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
def _set_credentials(self):
'''
:param config_data: contents of the inventory config file
'''
self.boto_profile = self.get_option('aws_profile')
self.aws_access_key_id = self.get_option('aws_access_key')
self.aws_secret_access_key = self.get_option('aws_secret_key')
self.aws_security_token = self.get_option('aws_security_token')
self.iam_role_arn = self.get_option('iam_role_arn')
if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
session = botocore.session.get_session()
try:
credentials = session.get_credentials().get_frozen_credentials()
except AttributeError:
pass
else:
self.aws_access_key_id = credentials.access_key
self.aws_secret_access_key = credentials.secret_key
self.aws_security_token = credentials.token
if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key):
raise AnsibleError("Insufficient boto credentials found. Please provide them in your "
"inventory configuration file or set them as environment variables.")
def verify_file(self, path):
'''
:param loader: an ansible.parsing.dataloader.DataLoader object
:param path: the path to the inventory config file
:return the contents of the config file
'''
if super(InventoryModule, self).verify_file(path):
if path.endswith(('aws_ec2.yml', 'aws_ec2.yaml')):
return True
display.debug("aws_ec2 inventory filename must end with 'aws_ec2.yml' or 'aws_ec2.yaml'")
return False
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
self._read_config_data(path)
if self.get_option('use_contrib_script_compatible_sanitization'):
self._sanitize_group_name = self._legacy_script_compatible_group_sanitization
self._set_credentials()
# get user specifications
regions = self.get_option('regions')
filters = ansible_dict_to_boto3_filter_list(self.get_option('filters'))
hostnames = self.get_option('hostnames')
strict_permissions = self.get_option('strict_permissions')
cache_key = self.get_cache_key(path)
# false when refresh_cache or --flush-cache is used
if cache:
# get the user-specified directive
cache = self.get_option('cache')
# Generate inventory
cache_needs_update = False
if cache:
try:
results = self._cache[cache_key]
except KeyError:
# if cache expires or cache file doesn't exist
cache_needs_update = True
if not cache or cache_needs_update:
results = self._query(regions, filters, strict_permissions)
self._populate(results, hostnames)
# If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used
# when the user is using caching, update the cached inventory
if cache_needs_update or (not cache and self.get_option('cache')):
self._cache[cache_key] = results
@staticmethod
def _legacy_script_compatible_group_sanitization(name):
# note that while this mirrors what the script used to do, it has many issues with unicode and usability in python
regex = re.compile(r"[^A-Za-z0-9\_\-]")
return regex.sub('_', name)

@ -1,326 +0,0 @@
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: aws_rds
plugin_type: inventory
short_description: rds instance source
description:
- Get instances and clusters from Amazon Web Services RDS.
- Uses a YAML configuration file that ends with aws_rds.(yml|yaml).
options:
regions:
description: A list of regions in which to describe RDS instances and clusters. Available regions are listed here
U(https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html)
default: []
filters:
description: A dictionary of filter value pairs. Available filters are listed here
U(https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-instances.html#options). If you filter by
db-cluster-id and I(include_clusters) is True it will apply to clusters as well.
default: {}
strict_permissions:
description: By default if an AccessDenied exception is encountered this plugin will fail. You can set strict_permissions to
False in the inventory config file which will allow the restrictions to be gracefully skipped.
type: bool
default: True
include_clusters:
description: Whether or not to query for Aurora clusters as well as instances
type: bool
default: False
statuses:
description: A list of desired states for instances/clusters to be added to inventory. Set to ['all'] as a shorthand to find everything.
type: list
default:
- creating
- available
extends_documentation_fragment:
- inventory_cache
- constructed
- aws_credentials
requirements:
- boto3
- botocore
author: Sloane Hertel (@s-hertel)
'''
EXAMPLES = '''
plugin: aws_rds
regions:
- us-east-1
- ca-central-1
keyed_groups:
- key: 'db_parameter_groups|json_query("[].db_parameter_group_name")'
prefix: rds_parameter_group
- key: engine
prefix: rds
- key: tags
- key: region
'''
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.core import is_boto3_error_code
from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
try:
import boto3
import botocore
except ImportError:
raise AnsibleError('The RDS dynamic inventory plugin requires boto3 and botocore.')
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
NAME = 'aws_rds'
def __init__(self):
super(InventoryModule, self).__init__()
self.credentials = {}
self.boto_profile = None
def _boto3_conn(self, regions):
'''
:param regions: A list of regions to create a boto3 client
Generator that yields a boto3 client and the region
'''
for region in regions:
try:
connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region, **self.credentials)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
if self.boto_profile:
try:
connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
else:
raise AnsibleError("Insufficient credentials found: %s" % to_native(e))
yield connection, region
def _get_hosts_by_region(self, connection, filters, strict):
def _add_tags_for_hosts(connection, hosts, strict):
for host in hosts:
if 'DBInstanceArn' in host:
resource_arn = host['DBInstanceArn']
else:
resource_arn = host['DBClusterArn']
try:
tags = connection.list_tags_for_resource(ResourceName=resource_arn)['TagList']
except is_boto3_error_code('AccessDenied') as e:
if not strict:
tags = []
else:
raise e
host['Tags'] = tags
def wrapper(f, *args, **kwargs):
try:
results = f(*args, **kwargs)
if 'DBInstances' in results:
results = results['DBInstances']
else:
results = results['DBClusters']
_add_tags_for_hosts(connection, results, strict)
except is_boto3_error_code('AccessDenied') as e: # pylint: disable=duplicate-except
if not strict:
results = []
else:
raise AnsibleError("Failed to query RDS: {0}".format(to_native(e)))
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
raise AnsibleError("Failed to query RDS: {0}".format(to_native(e)))
return results
return wrapper
def _get_all_hosts(self, regions, instance_filters, cluster_filters, strict, statuses, gather_clusters=False):
'''
:param regions: a list of regions in which to describe hosts
:param instance_filters: a list of boto3 filter dictionaries
:param cluster_filters: a list of boto3 filter dictionaries
:param strict: a boolean determining whether to fail or ignore 403 error codes
:param statuses: a list of statuses that the returned hosts should match
:return A list of host dictionaries
'''
all_instances = []
all_clusters = []
for connection, region in self._boto3_conn(regions):
paginator = connection.get_paginator('describe_db_instances')
all_instances.extend(
self._get_hosts_by_region(connection, instance_filters, strict)
(paginator.paginate(Filters=instance_filters).build_full_result)
)
if gather_clusters:
all_clusters.extend(
self._get_hosts_by_region(connection, cluster_filters, strict)
(connection.describe_db_clusters, **{'Filters': cluster_filters})
)
sorted_hosts = list(
sorted(all_instances, key=lambda x: x['DBInstanceIdentifier']) +
sorted(all_clusters, key=lambda x: x['DBClusterIdentifier'])
)
return self.find_hosts_with_valid_statuses(sorted_hosts, statuses)
def find_hosts_with_valid_statuses(self, hosts, statuses):
if 'all' in statuses:
return hosts
valid_hosts = []
for host in hosts:
if host.get('DBInstanceStatus') in statuses:
valid_hosts.append(host)
elif host.get('Status') in statuses:
valid_hosts.append(host)
return valid_hosts
def _populate(self, hosts):
group = 'aws_rds'
self.inventory.add_group(group)
if hosts:
self._add_hosts(hosts=hosts, group=group)
self.inventory.add_child('all', group)
def _populate_from_source(self, source_data):
hostvars = source_data.pop('_meta', {}).get('hostvars', {})
for group in source_data:
if group == 'all':
continue
else:
self.inventory.add_group(group)
hosts = source_data[group].get('hosts', [])
for host in hosts:
self._populate_host_vars([host], hostvars.get(host, {}), group)
self.inventory.add_child('all', group)
def _get_hostname(self, host):
if host.get('DBInstanceIdentifier'):
return host['DBInstanceIdentifier']
else:
return host['DBClusterIdentifier']
def _format_inventory(self, hosts):
results = {'_meta': {'hostvars': {}}}
group = 'aws_rds'
results[group] = {'hosts': []}
for host in hosts:
hostname = self._get_hostname(host)
results[group]['hosts'].append(hostname)
h = self.inventory.get_host(hostname)
results['_meta']['hostvars'][h.name] = h.vars
return results
def _add_hosts(self, hosts, group):
'''
:param hosts: a list of hosts to be added to a group
:param group: the name of the group to which the hosts belong
'''
for host in hosts:
hostname = self._get_hostname(host)
host = camel_dict_to_snake_dict(host, ignore_list=['Tags'])
host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', []))
# Allow easier grouping by region
if 'availability_zone' in host:
host['region'] = host['availability_zone'][:-1]
elif 'availability_zones' in host:
host['region'] = host['availability_zones'][0][:-1]
self.inventory.add_host(hostname, group=group)
for hostvar, hostval in host.items():
self.inventory.set_variable(hostname, hostvar, hostval)
# Use constructed if applicable
strict = self.get_option('strict')
# Composed variables
self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict)
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict)
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict)
def _set_credentials(self):
'''
:param config_data: contents of the inventory config file
'''
self.boto_profile = self.get_option('aws_profile')
aws_access_key_id = self.get_option('aws_access_key')
aws_secret_access_key = self.get_option('aws_secret_key')
aws_security_token = self.get_option('aws_security_token')
if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key):
session = botocore.session.get_session()
if session.get_credentials() is not None:
aws_access_key_id = session.get_credentials().access_key
aws_secret_access_key = session.get_credentials().secret_key
aws_security_token = session.get_credentials().token
if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key):
raise AnsibleError("Insufficient boto credentials found. Please provide them in your "
"inventory configuration file or set them as environment variables.")
if aws_access_key_id:
self.credentials['aws_access_key_id'] = aws_access_key_id
if aws_secret_access_key:
self.credentials['aws_secret_access_key'] = aws_secret_access_key
if aws_security_token:
self.credentials['aws_session_token'] = aws_security_token
def verify_file(self, path):
'''
:param loader: an ansible.parsing.dataloader.DataLoader object
:param path: the path to the inventory config file
:return the contents of the config file
'''
if super(InventoryModule, self).verify_file(path):
if path.endswith(('aws_rds.yml', 'aws_rds.yaml')):
return True
return False
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
config_data = self._read_config_data(path)
self._set_credentials()
# get user specifications
regions = self.get_option('regions')
filters = self.get_option('filters')
strict_permissions = self.get_option('strict_permissions')
statuses = self.get_option('statuses')
include_clusters = self.get_option('include_clusters')
instance_filters = ansible_dict_to_boto3_filter_list(filters)
cluster_filters = []
if 'db-cluster-id' in filters and include_clusters:
cluster_filters = ansible_dict_to_boto3_filter_list({'db-cluster-id': filters['db-cluster-id']})
cache_key = self.get_cache_key(path)
# false when refresh_cache or --flush-cache is used
if cache:
# get the user-specified directive
cache = self.get_option('cache')
# Generate inventory
formatted_inventory = {}
cache_needs_update = False
if cache:
try:
results = self._cache[cache_key]
except KeyError:
# if cache expires or cache file doesn't exist
cache_needs_update = True
else:
self._populate_from_source(results)
if not cache or cache_needs_update:
results = self._get_all_hosts(regions, instance_filters, cluster_filters, strict_permissions, statuses, include_clusters)
self._populate(results)
formatted_inventory = self._format_inventory(results)
# If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used
# when the user is using caching, update the cached inventory
if cache_needs_update or (not cache and self.get_option('cache')):
self._cache[cache_key] = formatted_inventory

@ -1,131 +0,0 @@
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: aws_account_attribute
author:
- Sloane Hertel <shertel@redhat.com>
version_added: "2.5"
requirements:
- boto3
- botocore
extends_documentation_fragment:
- aws_credentials
- aws_region
short_description: Look up AWS account attributes.
description:
- Describes attributes of your AWS account. You can specify one of the listed
attribute choices or omit it to see all attributes.
options:
attribute:
description: The attribute for which to get the value(s).
choices:
- supported-platforms
- default-vpc
- max-instances
- vpc-max-security-groups-per-interface
- max-elastic-ips
- vpc-max-elastic-ips
- has-ec2-classic
"""
EXAMPLES = """
vars:
has_ec2_classic: "{{ lookup('aws_account_attribute', attribute='has-ec2-classic') }}"
# true | false
default_vpc_id: "{{ lookup('aws_account_attribute', attribute='default-vpc') }}"
# vpc-xxxxxxxx | none
account_details: "{{ lookup('aws_account_attribute', wantlist='true') }}"
# {'default-vpc': ['vpc-xxxxxxxx'], 'max-elastic-ips': ['5'], 'max-instances': ['20'],
# 'supported-platforms': ['VPC', 'EC2'], 'vpc-max-elastic-ips': ['5'], 'vpc-max-security-groups-per-interface': ['5']}
"""
RETURN = """
_raw:
description:
Returns a boolean when I(attribute) is check_ec2_classic. Otherwise returns the value(s) of the attribute
(or all attributes if one is not specified).
"""
from ansible.errors import AnsibleError
try:
import boto3
import botocore
except ImportError:
raise AnsibleError("The lookup aws_account_attribute requires boto3 and botocore.")
from ansible.plugins import AnsiblePlugin
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info
from ansible.module_utils._text import to_native
from ansible.module_utils.six import string_types
import os
def _boto3_conn(region, credentials):
boto_profile = credentials.pop('aws_profile', None)
try:
connection = boto3.session.Session(profile_name=boto_profile).client('ec2', region, **credentials)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
if boto_profile:
try:
connection = boto3.session.Session(profile_name=boto_profile).client('ec2', region)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
raise AnsibleError("Insufficient credentials found.")
else:
raise AnsibleError("Insufficient credentials found.")
return connection
def _get_credentials(options):
credentials = {}
credentials['aws_profile'] = options['aws_profile']
credentials['aws_secret_access_key'] = options['aws_secret_key']
credentials['aws_access_key_id'] = options['aws_access_key']
credentials['aws_session_token'] = options['aws_security_token']
return credentials
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
self.set_options(var_options=variables, direct=kwargs)
boto_credentials = _get_credentials(self._options)
region = self._options['region']
client = _boto3_conn(region, boto_credentials)
attribute = kwargs.get('attribute')
params = {'AttributeNames': []}
check_ec2_classic = False
if 'has-ec2-classic' == attribute:
check_ec2_classic = True
params['AttributeNames'] = ['supported-platforms']
elif attribute:
params['AttributeNames'] = [attribute]
try:
response = client.describe_account_attributes(**params)['AccountAttributes']
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
raise AnsibleError("Failed to describe account attributes: %s" % to_native(e))
if check_ec2_classic:
attr = response[0]
return any(value['AttributeValue'] == 'EC2' for value in attr['AttributeValues'])
if attribute:
attr = response[0]
return [value['AttributeValue'] for value in attr['AttributeValues']]
flattened = {}
for k_v_dict in response:
flattened[k_v_dict['AttributeName']] = [value['AttributeValue'] for value in k_v_dict['AttributeValues']]
return flattened

@ -1,140 +0,0 @@
# Copyright: (c) 2018, Aaron Smith <ajsmith10381@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r"""
lookup: aws_secret
author:
- Aaron Smith <ajsmith10381@gmail.com>
version_added: "2.8"
requirements:
- boto3
- botocore>=1.10.0
extends_documentation_fragment:
- aws_credentials
- aws_region
short_description: Look up secrets stored in AWS Secrets Manager.
description:
- Look up secrets stored in AWS Secrets Manager provided the caller
has the appropriate permissions to read the secret.
- Lookup is based on the secret's `Name` value.
- Optional parameters can be passed into this lookup; `version_id` and `version_stage`
options:
_terms:
description: Name of the secret to look up in AWS Secrets Manager.
required: True
version_id:
description: Version of the secret(s).
required: False
version_stage:
description: Stage of the secret version.
required: False
join:
description:
- Join two or more entries to form an extended secret.
- This is useful for overcoming the 4096 character limit imposed by AWS.
type: boolean
default: false
"""
EXAMPLES = r"""
- name: Create RDS instance with aws_secret lookup for password param
rds:
command: create
instance_name: app-db
db_engine: MySQL
size: 10
instance_type: db.m1.small
username: dbadmin
password: "{{ lookup('aws_secret', 'DbSecret') }}"
tags:
Environment: staging
"""
RETURN = r"""
_raw:
description:
Returns the value of the secret stored in AWS Secrets Manager.
"""
from ansible.errors import AnsibleError
try:
import boto3
import botocore
except ImportError:
raise AnsibleError("The lookup aws_secret requires boto3 and botocore.")
from ansible.plugins import AnsiblePlugin
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_native
def _boto3_conn(region, credentials):
boto_profile = credentials.pop('aws_profile', None)
try:
connection = boto3.session.Session(profile_name=boto_profile).client('secretsmanager', region, **credentials)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
if boto_profile:
try:
connection = boto3.session.Session(profile_name=boto_profile).client('secretsmanager', region)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e:
raise AnsibleError("Insufficient credentials found.")
else:
raise AnsibleError("Insufficient credentials found.")
return connection
class LookupModule(LookupBase):
def _get_credentials(self):
credentials = {}
credentials['aws_profile'] = self.get_option('aws_profile')
credentials['aws_secret_access_key'] = self.get_option('aws_secret_key')
credentials['aws_access_key_id'] = self.get_option('aws_access_key')
credentials['aws_session_token'] = self.get_option('aws_security_token')
# fallback to IAM role credentials
if not credentials['aws_profile'] and not (credentials['aws_access_key_id'] and credentials['aws_secret_access_key']):
session = botocore.session.get_session()
if session.get_credentials() is not None:
credentials['aws_access_key_id'] = session.get_credentials().access_key
credentials['aws_secret_access_key'] = session.get_credentials().secret_key
credentials['aws_session_token'] = session.get_credentials().token
return credentials
def run(self, terms, variables, **kwargs):
self.set_options(var_options=variables, direct=kwargs)
boto_credentials = self._get_credentials()
region = self.get_option('region')
client = _boto3_conn(region, boto_credentials)
secrets = []
for term in terms:
params = {}
params['SecretId'] = term
if kwargs.get('version_id'):
params['VersionId'] = kwargs.get('version_id')
if kwargs.get('version_stage'):
params['VersionStage'] = kwargs.get('version_stage')
try:
response = client.get_secret_value(**params)
if 'SecretBinary' in response:
secrets.append(response['SecretBinary'])
if 'SecretString' in response:
secrets.append(response['SecretString'])
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
raise AnsibleError("Failed to retrieve secret: %s" % to_native(e))
if kwargs.get('join'):
joined_secret = []
joined_secret.append(''.join(secrets))
return joined_secret
else:
return secrets

@ -1,79 +0,0 @@
# (c) 2016 James Turner <turnerjsm@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: aws_service_ip_ranges
author:
- James Turner <turnerjsm@gmail.com>
version_added: "2.5"
requirements:
- must have public internet connectivity
short_description: Look up the IP ranges for services provided in AWS such as EC2 and S3.
description:
- AWS publishes IP ranges used on the public internet by EC2, S3, CloudFront, CodeBuild, Route53, and Route53 Health Checking.
- This module produces a list of all the ranges (by default) or can narrow down the list to the specified region or service.
options:
service:
description: 'The service to filter ranges by. Options: EC2, S3, CLOUDFRONT, CODEbUILD, ROUTE53, ROUTE53_HEALTHCHECKS'
region:
description: 'The AWS region to narrow the ranges to. Examples: us-east-1, eu-west-2, ap-southeast-1'
"""
EXAMPLES = """
vars:
ec2_ranges: "{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}"
tasks:
- name: "use list return option and iterate as a loop"
debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}"
# "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 "
- name: "Pull S3 IP ranges, and print the default return style"
debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}"
# "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17"
"""
RETURN = """
_raw:
description: comma-separated list of CIDR ranges
"""
import json
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.module_utils._text import to_native
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
try:
resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json')
amazon_response = json.load(resp)['prefixes']
except getattr(json.decoder, 'JSONDecodeError', ValueError) as e:
# on Python 3+, json.decoder.JSONDecodeError is raised for bad
# JSON. On 2.x it's a ValueError
raise AnsibleError("Could not decode AWS IP ranges: %s" % to_native(e))
except HTTPError as e:
raise AnsibleError("Received HTTP error while pulling IP ranges: %s" % to_native(e))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for: %s" % to_native(e))
except URLError as e:
raise AnsibleError("Failed look up IP range service: %s" % to_native(e))
except ConnectionError as e:
raise AnsibleError("Error connecting to IP range service: %s" % to_native(e))
if 'region' in kwargs:
region = kwargs['region']
amazon_response = (item for item in amazon_response if item['region'] == region)
if 'service' in kwargs:
service = str.upper(kwargs['service'])
amazon_response = (item for item in amazon_response if item['service'] == service)
return [item['ip_prefix'] for item in amazon_response]

@ -1,233 +0,0 @@
# (c) 2016, Bill Wang <ozbillwang(at)gmail.com>
# (c) 2017, Marat Bakeev <hawara(at)gmail.com>
# (c) 2018, Michael De La Rue <siblemitcom.mddlr(at)spamgourmet.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
lookup: aws_ssm
author:
- Bill Wang <ozbillwang(at)gmail.com>
- Marat Bakeev <hawara(at)gmail.com>
- Michael De La Rue <siblemitcom.mddlr@spamgourmet.com>
version_added: 2.5
requirements:
- boto3
- botocore
short_description: Get the value for a SSM parameter or all parameters under a path.
description:
- Get the value for an Amazon Simple Systems Manager parameter or a hierarchy of parameters.
The first argument you pass the lookup can either be a parameter name or a hierarchy of
parameters. Hierarchies start with a forward slash and end with the parameter name. Up to
5 layers may be specified.
- If looking up an explicitly listed parameter by name which does not exist then the lookup will
return a None value which will be interpreted by Jinja2 as an empty string. You can use the
```default``` filter to give a default value in this case but must set the second parameter to
true (see examples below)
- When looking up a path for parameters under it a dictionary will be returned for each path.
If there is no parameter under that path then the return will be successful but the
dictionary will be empty.
- If the lookup fails due to lack of permissions or due to an AWS client error then the aws_ssm
will generate an error, normally crashing the current ansible task. This is normally the right
thing since ignoring a value that IAM isn't giving access to could cause bigger problems and
wrong behaviour or loss of data. If you want to continue in this case then you will have to set
up two ansible tasks, one which sets a variable and ignores failures one which uses the value
of that variable with a default. See the examples below.
options:
decrypt:
description: A boolean to indicate whether to decrypt the parameter.
default: true
type: boolean
bypath:
description: A boolean to indicate whether the parameter is provided as a hierarchy.
default: false
type: boolean
recursive:
description: A boolean to indicate whether to retrieve all parameters within a hierarchy.
default: false
type: boolean
shortnames:
description: Indicates whether to return the name only without path if using a parameter hierarchy.
default: false
type: boolean
'''
EXAMPLES = '''
# lookup sample:
- name: lookup ssm parameter store in the current region
debug: msg="{{ lookup('aws_ssm', 'Hello' ) }}"
- name: lookup ssm parameter store in nominated region
debug: msg="{{ lookup('aws_ssm', 'Hello', region='us-east-2' ) }}"
- name: lookup ssm parameter store without decrypted
debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=False ) }}"
- name: lookup ssm parameter store in nominated aws profile
debug: msg="{{ lookup('aws_ssm', 'Hello', aws_profile='myprofile' ) }}"
- name: lookup ssm parameter store using explicit aws credentials
debug: msg="{{ lookup('aws_ssm', 'Hello', aws_access_key=my_aws_access_key, aws_secret_key=my_aws_secret_key, aws_security_token=my_security_token ) }}"
- name: lookup ssm parameter store with all options.
debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=false, region='us-east-2', aws_profile='myprofile') }}"
- name: lookup a key which doesn't exist, returns ""
debug: msg="{{ lookup('aws_ssm', 'NoKey') }}"
- name: lookup a key which doesn't exist, returning a default ('root')
debug: msg="{{ lookup('aws_ssm', 'AdminID') | default('root', true) }}"
- name: lookup a key which doesn't exist failing to store it in a fact
set_fact:
temp_secret: "{{ lookup('aws_ssm', '/NoAccess/hiddensecret') }}"
ignore_errors: true
- name: show fact default to "access failed" if we don't have access
debug: msg="{{ 'the secret was:' ~ temp_secret | default('could not access secret') }}"
- name: return a dictionary of ssm parameters from a hierarchy path
debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', bypath=true, recursive=true ) }}"
- name: return a dictionary of ssm parameters from a hierarchy path with shortened names (param instead of /PATH/to/param)
debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', shortnames=true, bypath=true, recursive=true ) }}"
- name: Iterate over a parameter hierarchy (one iteration per parameter)
debug: msg='Key contains {{ item.key }} , with value {{ item.value }}'
loop: '{{ lookup("aws_ssm", "/demo/", region="ap-southeast-2", bypath=True) | dict2items }}'
- name: Iterate over multiple paths as dictionaries (one iteration per path)
debug: msg='Path contains {{ item }}'
loop: '{{ lookup("aws_ssm", "/demo/", "/demo1/", bypath=True)}}'
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.ec2 import HAS_BOTO3, boto3_tag_list_to_ansible_dict
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.display import Display
try:
from botocore.exceptions import ClientError
import botocore
import boto3
except ImportError:
pass # will be captured by imported HAS_BOTO3
display = Display()
def _boto3_conn(region, credentials):
if 'boto_profile' in credentials:
boto_profile = credentials.pop('boto_profile')
else:
boto_profile = None
try:
connection = boto3.session.Session(profile_name=boto_profile).client('ssm', region, **credentials)
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError):
if boto_profile:
try:
connection = boto3.session.Session(profile_name=boto_profile).client('ssm', region)
# FIXME: we should probably do better passing on of the error information
except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError):
raise AnsibleError("Insufficient credentials found.")
else:
raise AnsibleError("Insufficient credentials found.")
return connection
class LookupModule(LookupBase):
def run(self, terms, variables=None, boto_profile=None, aws_profile=None,
aws_secret_key=None, aws_access_key=None, aws_security_token=None, region=None,
bypath=False, shortnames=False, recursive=False, decrypt=True):
'''
:arg terms: a list of lookups to run.
e.g. ['parameter_name', 'parameter_name_too' ]
:kwarg variables: ansible variables active at the time of the lookup
:kwarg aws_secret_key: identity of the AWS key to use
:kwarg aws_access_key: AWS secret key (matching identity)
:kwarg aws_security_token: AWS session key if using STS
:kwarg decrypt: Set to True to get decrypted parameters
:kwarg region: AWS region in which to do the lookup
:kwarg bypath: Set to True to do a lookup of variables under a path
:kwarg recursive: Set to True to recurse below the path (requires bypath=True)
:returns: A list of parameter values or a list of dictionaries if bypath=True.
'''
if not HAS_BOTO3:
raise AnsibleError('botocore and boto3 are required for aws_ssm lookup.')
ret = []
response = {}
ssm_dict = {}
credentials = {}
if aws_profile:
credentials['boto_profile'] = aws_profile
else:
credentials['boto_profile'] = boto_profile
credentials['aws_secret_access_key'] = aws_secret_key
credentials['aws_access_key_id'] = aws_access_key
credentials['aws_session_token'] = aws_security_token
client = _boto3_conn(region, credentials)
ssm_dict['WithDecryption'] = decrypt
# Lookup by path
if bypath:
ssm_dict['Recursive'] = recursive
for term in terms:
ssm_dict["Path"] = term
display.vvv("AWS_ssm path lookup term: %s in region: %s" % (term, region))
try:
response = client.get_parameters_by_path(**ssm_dict)
except ClientError as e:
raise AnsibleError("SSM lookup exception: {0}".format(to_native(e)))
paramlist = list()
paramlist.extend(response['Parameters'])
# Manual pagination, since boto doesn't support it yet for get_parameters_by_path
while 'NextToken' in response:
response = client.get_parameters_by_path(NextToken=response['NextToken'], **ssm_dict)
paramlist.extend(response['Parameters'])
# shorten parameter names. yes, this will return duplicate names with different values.
if shortnames:
for x in paramlist:
x['Name'] = x['Name'][x['Name'].rfind('/') + 1:]
display.vvvv("AWS_ssm path lookup returned: %s" % str(paramlist))
if len(paramlist):
ret.append(boto3_tag_list_to_ansible_dict(paramlist,
tag_name_key_name="Name",
tag_value_key_name="Value"))
else:
ret.append({})
# Lookup by parameter name - always returns a list with one or no entry.
else:
display.vvv("AWS_ssm name lookup term: %s" % terms)
ssm_dict["Names"] = terms
try:
response = client.get_parameters(**ssm_dict)
except ClientError as e:
raise AnsibleError("SSM lookup exception: {0}".format(to_native(e)))
params = boto3_tag_list_to_ansible_dict(response['Parameters'], tag_name_key_name="Name",
tag_value_key_name="Value")
for i in terms:
if i.split(':', 1)[0] in params:
ret.append(params[i])
elif i in response['InvalidParameters']:
ret.append(None)
else:
raise AnsibleError("Ansible internal error: aws_ssm lookup failed to understand boto3 return value: {0}".format(str(response)))
return ret
display.vvvv("AWS_ssm path lookup returning: %s " % str(ret))
return ret

@ -1,2 +0,0 @@
cloud/aws
shippable/aws/group2

@ -1,15 +0,0 @@
- name: retrieve caller facts
aws_caller_info:
region: "{{ aws_region }}"
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
register: result
- name: assert correct keys are returned
assert:
that:
- result.account is not none
- result.arn is not none
- result.user_id is not none
- result.account_alias is not none

@ -1,2 +0,0 @@
cloud/aws
shippable/aws/group4

@ -1,3 +0,0 @@
---
# defaults file for s3
bucket_name: '{{resource_prefix}}'

@ -1,590 +0,0 @@
---
# tasks file for test_s3
- name: set up aws connection info
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region }}"
no_log: yes
- block:
- name: Create temporary directory
tempfile:
state: directory
register: tmpdir
- name: Create content
set_fact:
content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}"
- name: test create bucket without permissions
aws_s3:
bucket: "{{ bucket_name }}"
mode: create
register: result
ignore_errors: yes
- assert:
that:
- result is failed
- "result.msg != 'MODULE FAILURE'"
- name: test create bucket
aws_s3:
bucket: "{{ bucket_name }}"
mode: create
<<: *aws_connection_info
register: result
- assert:
that:
- result is changed
- name: trying to create a bucket name that already exists
aws_s3:
bucket: "{{ bucket_name }}"
mode: create
<<: *aws_connection_info
register: result
- assert:
that:
- result is not changed
- name: Create local upload.txt
copy:
content: "{{ content }}"
dest: "{{ tmpdir.path }}/upload.txt"
- name: stat the file
stat:
path: "{{ tmpdir.path }}/upload.txt"
get_checksum: yes
register: upload_file
- name: test putting an object in the bucket
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is changed
- result.msg == "PUT operation complete"
- name: test using aws_s3 with async
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt
<<: *aws_connection_info
register: test_async
async: 30
poll: 0
- name: ensure it completed
async_status:
jid: "{{ test_async.ansible_job_id }}"
register: status
until: status is finished
retries: 10
- name: test put with overwrite=different and unmodified object
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt
overwrite: different
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is not changed
- name: check that roles file lookups work as expected
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: hello.txt
object: delete.txt
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is changed
- result.msg == "PUT operation complete"
- name: test put with overwrite=never
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt
overwrite: never
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is not changed
- name: test put with overwrite=different and modified object
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt
overwrite: different
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is changed
- name: test put with overwrite=always
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmpdir.path }}/upload.txt"
object: delete.txt
overwrite: always
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is changed
- name: test get object
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmpdir.path }}/download.txt"
object: delete.txt
<<: *aws_connection_info
retries: 3
delay: 3
register: result
until: "result.msg == 'GET operation complete'"
- name: stat the file so we can compare the checksums
stat:
path: "{{ tmpdir.path }}/download.txt"
get_checksum: yes
register: download_file
- assert:
that:
- upload_file.stat.checksum == download_file.stat.checksum
- name: test get with overwrite=different and identical files
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmpdir.path }}/download.txt"
object: delete.txt
overwrite: different
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is not changed
- name: modify destination
copy:
dest: "{{ tmpdir.path }}/download.txt"
src: hello.txt
- name: test get with overwrite=never
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmpdir.path }}/download.txt"
object: delete.txt
overwrite: never
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is not changed
- name: test get with overwrite=different and modified file
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmpdir.path }}/download.txt"
object: delete.txt
overwrite: different
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is changed
- name: test get with overwrite=always
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmpdir.path }}/download.txt"
object: delete.txt
overwrite: always
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is changed
- name: test geturl of the object
aws_s3:
bucket: "{{ bucket_name }}"
mode: geturl
object: delete.txt
<<: *aws_connection_info
retries: 3
delay: 3
register: result
until: result is changed
- assert:
that:
- "'Download url:' in result.msg"
- result is changed
- name: test getstr of the object
aws_s3:
bucket: "{{ bucket_name }}"
mode: getstr
object: delete.txt
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result.msg == "GET operation complete"
- result.contents == content
- name: test list to get all objects in the bucket
aws_s3:
bucket: "{{ bucket_name }}"
mode: list
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- "'delete.txt' in result.s3_keys"
- result.msg == "LIST operation complete"
- name: test delobj to just delete an object in the bucket
aws_s3:
bucket: "{{ bucket_name }}"
mode: delobj
object: delete.txt
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- "'Object deleted from bucket' in result.msg"
- result is changed
- name: test putting an encrypted object in the bucket
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmpdir.path }}/upload.txt"
encrypt: yes
object: delete_encrypt.txt
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is changed
- result.msg == "PUT operation complete"
- name: test get encrypted object
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmpdir.path }}/download_encrypted.txt"
object: delete_encrypt.txt
<<: *aws_connection_info
retries: 3
delay: 3
register: result
until: "result.msg == 'GET operation complete'"
- name: stat the file so we can compare the checksums
stat:
path: "{{ tmpdir.path }}/download_encrypted.txt"
get_checksum: yes
register: download_file
- assert:
that:
- upload_file.stat.checksum == download_file.stat.checksum
- name: delete encrypted file
aws_s3:
bucket: "{{ bucket_name }}"
mode: delobj
object: delete_encrypt.txt
<<: *aws_connection_info
retries: 3
delay: 3
- name: test putting an aws:kms encrypted object in the bucket
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmpdir.path }}/upload.txt"
encrypt: yes
encryption_mode: aws:kms
object: delete_encrypt_kms.txt
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- result is changed
- result.msg == "PUT operation complete"
- name: test get KMS encrypted object
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmpdir.path }}/download_kms.txt"
object: delete_encrypt_kms.txt
<<: *aws_connection_info
retries: 3
delay: 3
register: result
until: "result.msg == 'GET operation complete'"
- name: get the stat of the file so we can compare the checksums
stat:
path: "{{ tmpdir.path }}/download_kms.txt"
get_checksum: yes
register: download_file
- assert:
that:
- upload_file.stat.checksum == download_file.stat.checksum
# FIXME - could use a test that checks uploaded file is *actually* aws:kms encrypted
- name: delete KMS encrypted file
aws_s3:
bucket: "{{ bucket_name }}"
mode: delobj
object: delete_encrypt_kms.txt
<<: *aws_connection_info
retries: 3
delay: 3
# FIXME: could use a test that checks non standard KMS key
# but that would require ability to create and remove such keys.
# PRs exist for that, but propose deferring until after merge.
- name: test creation of empty path
aws_s3:
bucket: "{{ bucket_name }}"
mode: create
object: foo/bar/baz/
<<: *aws_connection_info
retries: 3
delay: 3
register: result
- assert:
that:
- "'Virtual directory foo/bar/baz/ created' in result.msg"
- result is changed
- name: test deletion of empty path
aws_s3:
bucket: "{{ bucket_name }}"
mode: delobj
object: foo/bar/baz/
<<: *aws_connection_info
retries: 3
delay: 3
- name: test delete bucket
aws_s3:
bucket: "{{ bucket_name }}"
mode: delete
<<: *aws_connection_info
register: result
retries: 3
delay: 3
until: result is changed
- assert:
that:
- result is changed
- name: test create a bucket with a dot in the name
aws_s3:
bucket: "{{ bucket_name + '.bucket' }}"
mode: create
<<: *aws_connection_info
register: result
- assert:
that:
- result is changed
- name: test delete a bucket with a dot in the name
aws_s3:
bucket: "{{ bucket_name + '.bucket' }}"
mode: delete
<<: *aws_connection_info
register: result
- assert:
that:
- result is changed
- name: test delete a nonexistent bucket
aws_s3:
bucket: "{{ bucket_name + '.bucket' }}"
mode: delete
<<: *aws_connection_info
register: result
- assert:
that:
- result is not changed
- name: make tempfile 4 GB for OSX
command:
_raw_params: "dd if=/dev/zero of={{ tmpdir.path }}/largefile bs=1m count=4096"
when: ansible_distribution == 'MacOSX'
- name: make tempfile 4 GB for linux
command:
_raw_params: "dd if=/dev/zero of={{ tmpdir.path }}/largefile bs=1M count=4096"
when: ansible_system == 'Linux'
- name: test multipart download - platform specific
block:
- name: make a bucket to upload the file
aws_s3:
bucket: "{{ bucket_name }}"
mode: create
<<: *aws_connection_info
- name: upload the file to the bucket
aws_s3:
bucket: "{{ bucket_name }}"
mode: put
src: "{{ tmpdir.path }}/largefile"
object: multipart.txt
<<: *aws_connection_info
- name: download file once
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmpdir.path }}/download.txt"
object: multipart.txt
overwrite: different
<<: *aws_connection_info
retries: 3
delay: 3
until: "result.msg == 'GET operation complete'"
register: result
- assert:
that:
- result is changed
- name: download file again
aws_s3:
bucket: "{{ bucket_name }}"
mode: get
dest: "{{ tmpdir.path }}/download.txt"
object: multipart.txt
overwrite: different
<<: *aws_connection_info
register: result
- assert:
that:
- result is not changed
when: ansible_system == 'Linux' or ansible_distribution == 'MacOSX'
always:
- name: remove uploaded files
aws_s3:
bucket: "{{ bucket_name }}"
mode: delobj
object: "{{ item }}"
<<: *aws_connection_info
loop:
- hello.txt
- delete.txt
- delete_encrypt.txt
- delete_encrypt_kms.txt
ignore_errors: yes
- name: delete temporary files
file:
state: absent
path: "{{ tmpdir.path }}"
ignore_errors: yes
- name: delete the bucket
aws_s3:
bucket: "{{ bucket_name }}"
mode: delete
<<: *aws_connection_info
ignore_errors: yes

@ -1,3 +0,0 @@
cloud/aws
shippable/aws/group2
cloudformation_info

@ -1,8 +0,0 @@
stack_name: "{{ resource_prefix }}"
vpc_name: '{{ resource_prefix }}-vpc'
vpc_seed: '{{ resource_prefix }}'
vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16'
subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24'
ec2_ami_name: 'amzn2-ami-hvm-2.*-x86_64-gp2'

@ -1,37 +0,0 @@
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Create an Amazon EC2 instance.",
"Parameters" : {
"InstanceType" : {
"Description" : "EC2 instance type",
"Type" : "String",
"Default" : "t3.nano",
"AllowedValues" : [ "t3.micro", "t3.nano"]
},
"ImageId" : {
"Type" : "String"
},
"SubnetId" : {
"Type" : "String"
}
},
"Resources" : {
"EC2Instance" : {
"Type" : "AWS::EC2::Instance",
"Properties" : {
"InstanceType" : { "Ref" : "InstanceType" },
"ImageId" : { "Ref" : "ImageId" },
"SubnetId": { "Ref" : "SubnetId" }
}
}
},
"Outputs" : {
"InstanceId" : {
"Value" : { "Ref" : "EC2Instance" }
}
}
}

@ -1,463 +0,0 @@
---
- module_defaults:
group/aws:
aws_access_key: '{{ aws_access_key | default(omit) }}'
aws_secret_key: '{{ aws_secret_key | default(omit) }}'
security_token: '{{ security_token | default(omit) }}'
region: '{{ aws_region | default(omit) }}'
block:
# ==== Env setup ==========================================================
- name: list available AZs
aws_az_info:
register: region_azs
- name: pick an AZ for testing
set_fact:
availability_zone: "{{ region_azs.availability_zones[0].zone_name }}"
- name: Create a test VPC
ec2_vpc_net:
name: "{{ vpc_name }}"
cidr_block: "{{ vpc_cidr }}"
tags:
Name: Cloudformation testing
register: testing_vpc
- name: Create a test subnet
ec2_vpc_subnet:
vpc_id: "{{ testing_vpc.vpc.id }}"
cidr: "{{ subnet_cidr }}"
az: "{{ availability_zone }}"
register: testing_subnet
- name: Find AMI to use
ec2_ami_info:
owners: 'amazon'
filters:
name: '{{ ec2_ami_name }}'
register: ec2_amis
- name: Set fact with latest AMI
vars:
latest_ami: '{{ ec2_amis.images | sort(attribute="creation_date") | last }}'
set_fact:
ec2_ami_image: '{{ latest_ami.image_id }}'
# ==== Cloudformation tests ===============================================
# 1. Basic stack creation (check mode, actual run and idempotency)
# 2. Tags
# 3. cloudformation_info tests (basic + all_facts)
# 4. termination_protection
# 5. create_changeset + changeset_name
# There is still scope to add tests for -
# 1. capabilities
# 2. stack_policy
# 3. on_create_failure (covered in unit tests)
# 4. Passing in a role
# 5. nested stacks?
- name: create a cloudformation stack (check mode)
cloudformation:
stack_name: "{{ stack_name }}"
template_body: "{{ lookup('file','cf_template.json') }}"
template_parameters:
InstanceType: "t3.nano"
ImageId: "{{ ec2_ami_image }}"
SubnetId: "{{ testing_subnet.subnet.id }}"
tags:
Stack: "{{ stack_name }}"
test: "{{ resource_prefix }}"
register: cf_stack
check_mode: yes
- name: check task return attributes
assert:
that:
- cf_stack.changed
- "'msg' in cf_stack and 'New stack would be created' in cf_stack.msg"
- name: create a cloudformation stack
cloudformation:
stack_name: "{{ stack_name }}"
template_body: "{{ lookup('file','cf_template.json') }}"
template_parameters:
InstanceType: "t3.nano"
ImageId: "{{ ec2_ami_image }}"
SubnetId: "{{ testing_subnet.subnet.id }}"
tags:
Stack: "{{ stack_name }}"
test: "{{ resource_prefix }}"
register: cf_stack
- name: check task return attributes
assert:
that:
- cf_stack.changed
- "'events' in cf_stack"
- "'output' in cf_stack and 'Stack CREATE complete' in cf_stack.output"
- "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs"
- "'stack_resources' in cf_stack"
- name: create a cloudformation stack (check mode) (idempotent)
cloudformation:
stack_name: "{{ stack_name }}"
template_body: "{{ lookup('file','cf_template.json') }}"
template_parameters:
InstanceType: "t3.nano"
ImageId: "{{ ec2_ami_image }}"
SubnetId: "{{ testing_subnet.subnet.id }}"
tags:
Stack: "{{ stack_name }}"
test: "{{ resource_prefix }}"
register: cf_stack
check_mode: yes
- name: check task return attributes
assert:
that:
- not cf_stack.changed
- name: create a cloudformation stack (idempotent)
cloudformation:
stack_name: "{{ stack_name }}"
template_body: "{{ lookup('file','cf_template.json') }}"
template_parameters:
InstanceType: "t3.nano"
ImageId: "{{ ec2_ami_image }}"
SubnetId: "{{ testing_subnet.subnet.id }}"
tags:
Stack: "{{ stack_name }}"
test: "{{ resource_prefix }}"
register: cf_stack
- name: check task return attributes
assert:
that:
- not cf_stack.changed
- "'output' in cf_stack and 'Stack is already up-to-date.' in cf_stack.output"
- "'stack_outputs' in cf_stack and 'InstanceId' in cf_stack.stack_outputs"
- "'stack_resources' in cf_stack"
- name: get stack details
cloudformation_info:
stack_name: "{{ stack_name }}"
register: stack_info
- name: assert stack info
assert:
that:
- "'cloudformation' in stack_info"
- "stack_info.cloudformation | length == 1"
- "stack_name in stack_info.cloudformation"
- "'stack_description' in stack_info.cloudformation[stack_name]"
- "'stack_outputs' in stack_info.cloudformation[stack_name]"
- "'stack_parameters' in stack_info.cloudformation[stack_name]"
- "'stack_tags' in stack_info.cloudformation[stack_name]"
- "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name"
- name: get stack details (checkmode)
cloudformation_info:
stack_name: "{{ stack_name }}"
register: stack_info
check_mode: yes
- name: assert stack info
assert:
that:
- "'cloudformation' in stack_info"
- "stack_info.cloudformation | length == 1"
- "stack_name in stack_info.cloudformation"
- "'stack_description' in stack_info.cloudformation[stack_name]"
- "'stack_outputs' in stack_info.cloudformation[stack_name]"
- "'stack_parameters' in stack_info.cloudformation[stack_name]"
- "'stack_tags' in stack_info.cloudformation[stack_name]"
- "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name"
- name: get stack details (all_facts)
cloudformation_info:
stack_name: "{{ stack_name }}"
all_facts: yes
register: stack_info
- name: assert stack info
assert:
that:
- "'stack_events' in stack_info.cloudformation[stack_name]"
- "'stack_policy' in stack_info.cloudformation[stack_name]"
- "'stack_resource_list' in stack_info.cloudformation[stack_name]"
- "'stack_resources' in stack_info.cloudformation[stack_name]"
- "'stack_template' in stack_info.cloudformation[stack_name]"
- name: get stack details (all_facts) (checkmode)
cloudformation_info:
stack_name: "{{ stack_name }}"
all_facts: yes
register: stack_info
check_mode: yes
- name: assert stack info
assert:
that:
- "'stack_events' in stack_info.cloudformation[stack_name]"
- "'stack_policy' in stack_info.cloudformation[stack_name]"
- "'stack_resource_list' in stack_info.cloudformation[stack_name]"
- "'stack_resources' in stack_info.cloudformation[stack_name]"
- "'stack_template' in stack_info.cloudformation[stack_name]"
# ==== Cloudformation tests (create changeset) ============================
# try to create a changeset by changing instance type
- name: create a changeset
cloudformation:
stack_name: "{{ stack_name }}"
create_changeset: yes
changeset_name: "test-changeset"
template_body: "{{ lookup('file','cf_template.json') }}"
template_parameters:
InstanceType: "t3.micro"
ImageId: "{{ ec2_ami_image }}"
SubnetId: "{{ testing_subnet.subnet.id }}"
tags:
Stack: "{{ stack_name }}"
test: "{{ resource_prefix }}"
register: create_changeset_result
- name: assert changeset created
assert:
that:
- "create_changeset_result.changed"
- "'change_set_id' in create_changeset_result"
- "'Stack CREATE_CHANGESET complete' in create_changeset_result.output"
- name: get stack details with changesets
cloudformation_info:
stack_name: "{{ stack_name }}"
stack_change_sets: True
register: stack_info
- name: assert changesets in info
assert:
that:
- "'stack_change_sets' in stack_info.cloudformation[stack_name]"
- name: get stack details with changesets (checkmode)
cloudformation_info:
stack_name: "{{ stack_name }}"
stack_change_sets: True
register: stack_info
check_mode: yes
- name: assert changesets in info
assert:
that:
- "'stack_change_sets' in stack_info.cloudformation[stack_name]"
# try to create an empty changeset by passing in unchanged template
- name: create a changeset
cloudformation:
stack_name: "{{ stack_name }}"
create_changeset: yes
template_body: "{{ lookup('file','cf_template.json') }}"
template_parameters:
InstanceType: "t3.nano"
ImageId: "{{ ec2_ami_image }}"
SubnetId: "{{ testing_subnet.subnet.id }}"
tags:
Stack: "{{ stack_name }}"
test: "{{ resource_prefix }}"
register: create_changeset_result
- name: assert changeset created
assert:
that:
- "not create_changeset_result.changed"
- "'The created Change Set did not contain any changes to this stack and was deleted.' in create_changeset_result.output"
# ==== Cloudformation tests (termination_protection) ======================
- name: set termination protection to true
cloudformation:
stack_name: "{{ stack_name }}"
termination_protection: yes
template_body: "{{ lookup('file','cf_template.json') }}"
template_parameters:
InstanceType: "t3.nano"
ImageId: "{{ ec2_ami_image }}"
SubnetId: "{{ testing_subnet.subnet.id }}"
tags:
Stack: "{{ stack_name }}"
test: "{{ resource_prefix }}"
register: cf_stack
# This fails - #65592
# - name: check task return attributes
# assert:
# that:
# - cf_stack.changed
- name: get stack details
cloudformation_info:
stack_name: "{{ stack_name }}"
register: stack_info
- name: assert stack info
assert:
that:
- "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
- name: get stack details (checkmode)
cloudformation_info:
stack_name: "{{ stack_name }}"
register: stack_info
check_mode: yes
- name: assert stack info
assert:
that:
- "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
- name: set termination protection to false
cloudformation:
stack_name: "{{ stack_name }}"
termination_protection: no
template_body: "{{ lookup('file','cf_template.json') }}"
template_parameters:
InstanceType: "t3.nano"
ImageId: "{{ ec2_ami_image }}"
SubnetId: "{{ testing_subnet.subnet.id }}"
tags:
Stack: "{{ stack_name }}"
test: "{{ resource_prefix }}"
register: cf_stack
# This fails - #65592
# - name: check task return attributes
# assert:
# that:
# - cf_stack.changed
- name: get stack details
cloudformation_info:
stack_name: "{{ stack_name }}"
register: stack_info
- name: assert stack info
assert:
that:
- "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
- name: get stack details (checkmode)
cloudformation_info:
stack_name: "{{ stack_name }}"
register: stack_info
check_mode: yes
- name: assert stack info
assert:
that:
- "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection"
# ==== Cloudformation tests (delete stack tests) ==========================
- name: delete cloudformation stack (check mode)
cloudformation:
stack_name: "{{ stack_name }}"
state: absent
check_mode: yes
register: cf_stack
- name: check task return attributes
assert:
that:
- cf_stack.changed
- "'msg' in cf_stack and 'Stack would be deleted' in cf_stack.msg"
- name: delete cloudformation stack
cloudformation:
stack_name: "{{ stack_name }}"
state: absent
register: cf_stack
- name: check task return attributes
assert:
that:
- cf_stack.changed
- "'output' in cf_stack and 'Stack Deleted' in cf_stack.output"
- name: delete cloudformation stack (check mode) (idempotent)
cloudformation:
stack_name: "{{ stack_name }}"
state: absent
check_mode: yes
register: cf_stack
- name: check task return attributes
assert:
that:
- not cf_stack.changed
- "'msg' in cf_stack"
- >-
"Stack doesn't exist" in cf_stack.msg
- name: delete cloudformation stack (idempotent)
cloudformation:
stack_name: "{{ stack_name }}"
state: absent
register: cf_stack
- name: check task return attributes
assert:
that:
- not cf_stack.changed
- "'output' in cf_stack and 'Stack not found.' in cf_stack.output"
- name: get stack details
cloudformation_info:
stack_name: "{{ stack_name }}"
register: stack_info
- name: assert stack info
assert:
that:
- "not stack_info.cloudformation"
- name: get stack details (checkmode)
cloudformation_info:
stack_name: "{{ stack_name }}"
register: stack_info
check_mode: yes
- name: assert stack info
assert:
that:
- "not stack_info.cloudformation"
# ==== Cleanup ============================================================
always:
- name: delete stack
cloudformation:
stack_name: "{{ stack_name }}"
state: absent
ignore_errors: yes
- name: Delete test subnet
ec2_vpc_subnet:
vpc_id: "{{ testing_vpc.vpc.id }}"
cidr: "{{ subnet_cidr }}"
state: absent
ignore_errors: yes
- name: Delete test VPC
ec2_vpc_net:
name: "{{ vpc_name }}"
cidr_block: "{{ vpc_cidr }}"
state: absent
ignore_errors: yes

@ -1,4 +0,0 @@
cloud/aws
shippable/aws/group2
unstable
ec2_ami_info

@ -1,8 +0,0 @@
---
# defaults file for test_ec2_ami
ec2_ami_name: '{{resource_prefix}}'
ec2_ami_description: 'Created by ansible integration tests'
# image for Amazon Linux AMI 2017.03.1 (HVM), SSD Volume Type
ec2_ami_image:
us-east-1: ami-4fffc834
us-east-2: ami-ea87a78f

@ -1,3 +0,0 @@
dependencies:
- prepare_tests
- setup_ec2

@ -1,462 +0,0 @@
---
# tasks file for test_ec2_ami
- block:
# ============================================================
# SETUP: vpc, ec2 key pair, subnet, security group, ec2 instance, snapshot
- name: set aws_connection_info fact
set_fact:
aws_connection_info: &aws_connection_info
aws_region: '{{aws_region}}'
aws_access_key: '{{aws_access_key}}'
aws_secret_key: '{{aws_secret_key}}'
security_token: '{{security_token}}'
no_log: yes
- name: create a VPC to work in
ec2_vpc_net:
cidr_block: 10.0.0.0/24
state: present
name: '{{ ec2_ami_name }}_setup'
resource_tags:
Name: '{{ ec2_ami_name }}_setup'
<<: *aws_connection_info
register: setup_vpc
- name: create a key pair to use for creating an ec2 instance
ec2_key:
name: '{{ ec2_ami_name }}_setup'
state: present
<<: *aws_connection_info
register: setup_key
- name: create a subnet to use for creating an ec2 instance
ec2_vpc_subnet:
az: '{{ ec2_region }}a'
tags: '{{ ec2_ami_name }}_setup'
vpc_id: '{{ setup_vpc.vpc.id }}'
cidr: 10.0.0.0/24
state: present
resource_tags:
Name: '{{ ec2_ami_name }}_setup'
<<: *aws_connection_info
register: setup_subnet
- name: create a security group to use for creating an ec2 instance
ec2_group:
name: '{{ ec2_ami_name }}_setup'
description: 'created by Ansible integration tests'
state: present
vpc_id: '{{ setup_vpc.vpc.id }}'
<<: *aws_connection_info
register: setup_sg
- name: provision ec2 instance to create an image
ec2:
key_name: '{{ setup_key.key.name }}'
instance_type: t2.micro
state: present
image: '{{ ec2_region_images[ec2_region] }}'
wait: yes
instance_tags:
'{{ec2_ami_name}}_instance_setup': 'integration_tests'
group_id: '{{ setup_sg.group_id }}'
vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
<<: *aws_connection_info
register: setup_instance
- name: take a snapshot of the instance to create an image
ec2_snapshot:
instance_id: '{{ setup_instance.instance_ids[0] }}'
device_name: /dev/xvda
state: present
<<: *aws_connection_info
register: setup_snapshot
# ============================================================
- name: test clean failure if not providing image_id or name with state=present
ec2_ami:
instance_id: '{{ setup_instance.instance_ids[0] }}'
state: present
description: '{{ ec2_ami_description }}'
tags:
Name: '{{ ec2_ami_name }}_ami'
wait: yes
root_device_name: /dev/xvda
<<: *aws_connection_info
register: result
ignore_errors: yes
- name: assert error message is helpful
assert:
that:
- result.failed
- "result.msg == 'one of the following is required: name, image_id'"
# ============================================================
- name: create an image from the instance
ec2_ami:
instance_id: '{{ setup_instance.instance_ids[0] }}'
state: present
name: '{{ ec2_ami_name }}_ami'
description: '{{ ec2_ami_description }}'
tags:
Name: '{{ ec2_ami_name }}_ami'
wait: yes
root_device_name: /dev/xvda
<<: *aws_connection_info
register: result
- name: set image id fact for deletion later
set_fact:
ec2_ami_image_id: "{{ result.image_id }}"
- name: assert that image has been created
assert:
that:
- "result.changed"
- "result.image_id.startswith('ami-')"
- "'Name' in result.tags and result.tags.Name == ec2_ami_name + '_ami'"
# ============================================================
- name: gather facts about the image created
ec2_ami_info:
image_ids: '{{ ec2_ami_image_id }}'
<<: *aws_connection_info
register: ami_facts_result
ignore_errors: true
- name: assert that the right image was found
assert:
that:
- "ami_facts_result.images[0].image_id == ec2_ami_image_id"
# ============================================================
- name: delete the image
ec2_ami:
instance_id: '{{ setup_instance.instance_ids[0] }}'
state: absent
delete_snapshot: yes
name: '{{ ec2_ami_name }}_ami'
description: '{{ ec2_ami_description }}'
image_id: '{{ result.image_id }}'
tags:
Name: '{{ ec2_ami_name }}_ami'
wait: yes
<<: *aws_connection_info
ignore_errors: true
register: result
- name: assert that the image has been deleted
assert:
that:
- "result.changed"
- "'image_id' not in result"
- "result.snapshots_deleted"
# ============================================================
- name: test removing an ami if no image ID is provided (expected failed=true)
ec2_ami:
state: absent
<<: *aws_connection_info
register: result
ignore_errors: yes
- name: assert that an image ID is required
assert:
that:
- "result.failed"
- "result.msg == 'state is absent but all of the following are missing: image_id'"
# ============================================================
- name: create an image from the snapshot
ec2_ami:
name: '{{ ec2_ami_name }}_ami'
description: '{{ ec2_ami_description }}'
state: present
launch_permissions:
user_ids: []
tags:
Name: '{{ ec2_ami_name }}_ami'
root_device_name: /dev/xvda
device_mapping:
- device_name: /dev/xvda
volume_type: gp2
size: 8
delete_on_termination: true
snapshot_id: '{{ setup_snapshot.snapshot_id }}'
<<: *aws_connection_info
register: result
ignore_errors: true
- name: set image id fact for deletion later
set_fact:
ec2_ami_image_id: "{{ result.image_id }}"
ec2_ami_snapshot: "{{ result.block_device_mapping['/dev/xvda'].snapshot_id }}"
- name: assert a new ami has been created
assert:
that:
- "result.changed"
- "result.image_id.startswith('ami-')"
# ============================================================
- name: test default launch permissions idempotence
ec2_ami:
description: '{{ ec2_ami_description }}'
state: present
name: '{{ ec2_ami_name }}_ami'
tags:
Name: '{{ ec2_ami_name }}_ami'
root_device_name: /dev/xvda
image_id: '{{ result.image_id }}'
launch_permissions:
user_ids: []
device_mapping:
- device_name: /dev/xvda
volume_type: gp2
size: 8
delete_on_termination: true
snapshot_id: '{{ setup_snapshot.snapshot_id }}'
<<: *aws_connection_info
register: result
- name: assert a new ami has not been created
assert:
that:
- "not result.changed"
- "result.image_id.startswith('ami-')"
# ============================================================
- name: add a tag to the AMI
ec2_ami:
state: present
description: '{{ ec2_ami_description }}'
image_id: '{{ result.image_id }}'
name: '{{ ec2_ami_name }}_ami'
tags:
New: Tag
<<: *aws_connection_info
register: result
- name: assert a tag was added
assert:
that:
- "'Name' in result.tags and result.tags.Name == ec2_ami_name + '_ami'"
- "'New' in result.tags and result.tags.New == 'Tag'"
- name: use purge_tags to remove a tag from the AMI
ec2_ami:
state: present
description: '{{ ec2_ami_description }}'
image_id: '{{ result.image_id }}'
name: '{{ ec2_ami_name }}_ami'
tags:
New: Tag
purge_tags: yes
<<: *aws_connection_info
register: result
- name: assert a tag was removed
assert:
that:
- "'Name' not in result.tags"
- "'New' in result.tags and result.tags.New == 'Tag'"
# ============================================================
- name: update AMI launch permissions
ec2_ami:
state: present
image_id: '{{ result.image_id }}'
description: '{{ ec2_ami_description }}'
tags:
Name: '{{ ec2_ami_name }}_ami'
launch_permissions:
group_names: ['all']
<<: *aws_connection_info
register: result
- name: assert launch permissions were updated
assert:
that:
- "result.changed"
# ============================================================
- name: modify the AMI description
ec2_ami:
state: present
image_id: '{{ result.image_id }}'
name: '{{ ec2_ami_name }}_ami'
description: '{{ ec2_ami_description }}CHANGED'
tags:
Name: '{{ ec2_ami_name }}_ami'
launch_permissions:
group_names: ['all']
<<: *aws_connection_info
register: result
- name: assert the description changed
assert:
that:
- "result.changed"
# ============================================================
- name: remove public launch permissions
ec2_ami:
state: present
image_id: '{{ result.image_id }}'
name: '{{ ec2_ami_name }}_ami'
tags:
Name: '{{ ec2_ami_name }}_ami'
launch_permissions:
group_names: []
<<: *aws_connection_info
register: result
- name: assert launch permissions were updated
assert:
that:
- "result.changed"
# ============================================================
- name: delete ami without deleting the snapshot (default is not to delete)
ec2_ami:
instance_id: '{{ setup_instance.instance_ids[0] }}'
state: absent
name: '{{ ec2_ami_name }}_ami'
image_id: '{{ ec2_ami_image_id }}'
tags:
Name: '{{ ec2_ami_name }}_ami'
wait: yes
<<: *aws_connection_info
ignore_errors: true
register: result
- name: assert that the image has been deleted
assert:
that:
- "result.changed"
- "'image_id' not in result"
- name: ensure the snapshot still exists
ec2_snapshot_info:
snapshot_ids:
- '{{ ec2_ami_snapshot }}'
<<: *aws_connection_info
register: snapshot_result
- name: assert the snapshot wasn't deleted
assert:
that:
- "snapshot_result.snapshots[0].snapshot_id == ec2_ami_snapshot"
- name: delete ami for a second time
ec2_ami:
instance_id: '{{ setup_instance.instance_ids[0] }}'
state: absent
name: '{{ ec2_ami_name }}_ami'
image_id: '{{ ec2_ami_image_id }}'
tags:
Name: '{{ ec2_ami_name }}_ami'
wait: yes
<<: *aws_connection_info
register: result
- name: assert that image does not exist
assert:
that:
- not result.changed
- not result.failed
# ============================================================
always:
# ============================================================
# TEAR DOWN: snapshot, ec2 instance, ec2 key pair, security group, vpc
- name: Announce teardown start
debug:
msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****"
- name: delete ami
ec2_ami:
state: absent
image_id: "{{ ec2_ami_image_id }}"
name: '{{ ec2_ami_name }}_ami'
wait: yes
<<: *aws_connection_info
ignore_errors: yes
- name: remove setup snapshot of ec2 instance
ec2_snapshot:
state: absent
snapshot_id: '{{ setup_snapshot.snapshot_id }}'
<<: *aws_connection_info
ignore_errors: yes
- name: remove setup ec2 instance
ec2:
instance_type: t2.micro
instance_ids: '{{ setup_instance.instance_ids }}'
state: absent
wait: yes
instance_tags:
'{{ec2_ami_name}}_instance_setup': 'integration_tests'
group_id: '{{ setup_sg.group_id }}'
vpc_subnet_id: '{{ setup_subnet.subnet.id }}'
<<: *aws_connection_info
ignore_errors: yes
- name: remove setup keypair
ec2_key:
name: '{{ec2_ami_name}}_setup'
state: absent
<<: *aws_connection_info
ignore_errors: yes
- name: remove setup security group
ec2_group:
name: '{{ ec2_ami_name }}_setup'
description: 'created by Ansible integration tests'
state: absent
vpc_id: '{{ setup_vpc.vpc.id }}'
<<: *aws_connection_info
ignore_errors: yes
- name: remove setup subnet
ec2_vpc_subnet:
az: '{{ ec2_region }}a'
tags: '{{ec2_ami_name}}_setup'
vpc_id: '{{ setup_vpc.vpc.id }}'
cidr: 10.0.0.0/24
state: absent
resource_tags:
Name: '{{ ec2_ami_name }}_setup'
<<: *aws_connection_info
ignore_errors: yes
- name: remove setup VPC
ec2_vpc_net:
cidr_block: 10.0.0.0/24
state: absent
name: '{{ ec2_ami_name }}_setup'
resource_tags:
Name: '{{ ec2_ami_name }}_setup'
<<: *aws_connection_info
ignore_errors: yes

@ -1,20 +0,0 @@
---
# vars file for test_ec2_ami
# based on Amazon Linux AMI 2017.09.0 (HVM), SSD Volume Type
ec2_region_images:
us-east-1: ami-8c1be5f6
us-east-2: ami-c5062ba0
us-west-1: ami-02eada62
us-west-2: ami-e689729e
ca-central-1: ami-fd55ec99
eu-west-1: ami-acd005d5
eu-central-1: ami-c7ee5ca8
eu-west-2: ami-1a7f6d7e
ap-southeast-1: ami-0797ea64
ap-southeast-2: ami-8536d6e7
ap-northeast-2: ami-9bec36f5
ap-northeast-1: ami-2a69be4c
ap-south-1: ami-4fc58420
sa-east-1: ami-f1344b9d
cn-north-1: ami-fba67596

@ -1,2 +0,0 @@
cloud/aws
shippable/aws/group2

@ -1,3 +0,0 @@
---
# defaults file for test_ec2_eip
tag_prefix: '{{resource_prefix}}'

@ -1,3 +0,0 @@
dependencies:
- prepare_tests
- setup_ec2

@ -1,425 +0,0 @@
---
# __Test Info__
# Create a self signed cert and upload it to AWS
# http://www.akadia.com/services/ssh_test_certificate.html
# http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/ssl-server-cert.html
# __Test Outline__
#
# __ec2_elb_lb__
# create test elb with listeners and certificate
# change AZ's
# change listeners
# remove listeners
# remove elb
# __ec2-common__
# test environment variable EC2_REGION
# test with no parameters
# test with only instance_id
# test invalid region parameter
# test valid region parameter
# test invalid ec2_url parameter
# test valid ec2_url parameter
# test credentials from environment
# test credential parameters
- block:
# ============================================================
# create test elb with listeners, certificate, and health check
- name: Create ELB
ec2_elb_lb:
name: "{{ tag_prefix }}"
region: "{{ ec2_region }}"
ec2_access_key: "{{ ec2_access_key }}"
ec2_secret_key: "{{ ec2_secret_key }}"
security_token: "{{ security_token }}"
state: present
zones:
- "{{ ec2_region }}a"
- "{{ ec2_region }}b"
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
- protocol: http
load_balancer_port: 8080
instance_port: 8080
health_check:
ping_protocol: http
ping_port: 80
ping_path: "/index.html"
response_timeout: 5
interval: 30
unhealthy_threshold: 2
healthy_threshold: 10
register: info
- assert:
that:
- 'info.changed'
- 'info.elb.status == "created"'
- '"{{ ec2_region }}a" in info.elb.zones'
- '"{{ ec2_region }}b" in info.elb.zones'
- 'info.elb.health_check.healthy_threshold == 10'
- 'info.elb.health_check.interval == 30'
- 'info.elb.health_check.target == "HTTP:80/index.html"'
- 'info.elb.health_check.timeout == 5'
- 'info.elb.health_check.unhealthy_threshold == 2'
- '[80, 80, "HTTP", "HTTP"] in info.elb.listeners'
- '[8080, 8080, "HTTP", "HTTP"] in info.elb.listeners'
# ============================================================
# check ports, would be cool, but we are at the mercy of AWS
# to start things in a timely manner
#- name: check to make sure 80 is listening
# wait_for: host={{ info.elb.dns_name }} port=80 timeout=600
# register: result
#- name: assert can connect to port#
# assert: 'result.state == "started"'
#- name: check to make sure 443 is listening
# wait_for: host={{ info.elb.dns_name }} port=443 timeout=600
# register: result
#- name: assert can connect to port#
# assert: 'result.state == "started"'
# ============================================================
# Change AZ's
- name: Change AZ's
ec2_elb_lb:
name: "{{ tag_prefix }}"
region: "{{ ec2_region }}"
ec2_access_key: "{{ ec2_access_key }}"
ec2_secret_key: "{{ ec2_secret_key }}"
security_token: "{{ security_token }}"
state: present
zones:
- "{{ ec2_region }}c"
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_zones: yes
health_check:
ping_protocol: http
ping_port: 80
ping_path: "/index.html"
response_timeout: 5
interval: 30
unhealthy_threshold: 2
healthy_threshold: 10
register: info
- assert:
that:
- 'info.elb.status == "ok"'
- 'info.changed'
- 'info.elb.zones[0] == "{{ ec2_region }}c"'
# ============================================================
# Update AZ's
- name: Update AZ's
ec2_elb_lb:
name: "{{ tag_prefix }}"
region: "{{ ec2_region }}"
ec2_access_key: "{{ ec2_access_key }}"
ec2_secret_key: "{{ ec2_secret_key }}"
security_token: "{{ security_token }}"
state: present
zones:
- "{{ ec2_region }}a"
- "{{ ec2_region }}b"
- "{{ ec2_region }}c"
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_zones: yes
register: info
- assert:
that:
- 'info.changed'
- 'info.elb.status == "ok"'
- '"{{ ec2_region }}a" in info.elb.zones'
- '"{{ ec2_region }}b" in info.elb.zones'
- '"{{ ec2_region }}c" in info.elb.zones'
# ============================================================
# Purge Listeners
- name: Purge Listeners
ec2_elb_lb:
name: "{{ tag_prefix }}"
region: "{{ ec2_region }}"
ec2_access_key: "{{ ec2_access_key }}"
ec2_secret_key: "{{ ec2_secret_key }}"
security_token: "{{ security_token }}"
state: present
zones:
- "{{ ec2_region }}a"
- "{{ ec2_region }}b"
- "{{ ec2_region }}c"
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 81
purge_listeners: yes
register: info
- assert:
that:
- 'info.elb.status == "ok"'
- 'info.changed'
- '[80, 81, "HTTP", "HTTP"] in info.elb.listeners'
- 'info.elb.listeners|length == 1'
# ============================================================
# add Listeners
- name: Add Listeners
ec2_elb_lb:
name: "{{ tag_prefix }}"
region: "{{ ec2_region }}"
ec2_access_key: "{{ ec2_access_key }}"
ec2_secret_key: "{{ ec2_secret_key }}"
security_token: "{{ security_token }}"
state: present
zones:
- "{{ ec2_region }}a"
- "{{ ec2_region }}b"
- "{{ ec2_region }}c"
listeners:
- protocol: http
load_balancer_port: 8081
instance_port: 8081
purge_listeners: no
register: info
- assert:
that:
- 'info.elb.status == "ok"'
- 'info.changed'
- '[80, 81, "HTTP", "HTTP"] in info.elb.listeners'
- '[8081, 8081, "HTTP", "HTTP"] in info.elb.listeners'
- 'info.elb.listeners|length == 2'
# ============================================================
- name: test with no parameters
ec2_elb_lb:
register: result
ignore_errors: true
- name: assert failure when called with no parameters
assert:
that:
- 'result.failed'
- 'result.msg.startswith("missing required arguments: ")'
# ============================================================
- name: test with only name
ec2_elb_lb:
name="{{ tag_prefix }}"
register: result
ignore_errors: true
- name: assert failure when called with only name
assert:
that:
- 'result.failed'
- 'result.msg == "missing required arguments: state"'
# ============================================================
- name: test invalid region parameter
ec2_elb_lb:
name: "{{ tag_prefix }}"
region: 'asdf querty 1234'
state: present
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
zones:
- "{{ ec2_region }}a"
- "{{ ec2_region }}b"
- "{{ ec2_region }}c"
register: result
ignore_errors: true
- name: assert invalid region parameter
assert:
that:
- 'result.failed'
- 'result.msg.startswith("Region asdf querty 1234 does not seem to be available ")'
# ============================================================
- name: test valid region parameter
ec2_elb_lb:
name: "{{ tag_prefix }}"
region: "{{ ec2_region }}"
state: present
zones:
- "{{ ec2_region }}a"
- "{{ ec2_region }}b"
- "{{ ec2_region }}c"
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
register: result
ignore_errors: true
- name: assert valid region parameter
assert:
that:
- 'result.failed'
- 'result.msg.startswith("No handler was ready to authenticate.")'
# ============================================================
- name: test invalid ec2_url parameter
ec2_elb_lb:
name: "{{ tag_prefix }}"
region: "{{ ec2_region }}"
state: present
zones:
- "{{ ec2_region }}a"
- "{{ ec2_region }}b"
- "{{ ec2_region }}c"
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
environment:
EC2_URL: bogus.example.com
register: result
ignore_errors: true
- name: assert invalid ec2_url parameter
assert:
that:
- 'result.failed'
- 'result.msg.startswith("No handler was ready to authenticate.")'
# ============================================================
- name: test valid ec2_url parameter
ec2_elb_lb:
name: "{{ tag_prefix }}"
region: "{{ ec2_region }}"
state: present
zones:
- "{{ ec2_region }}a"
- "{{ ec2_region }}b"
- "{{ ec2_region }}c"
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
environment:
EC2_URL: '{{ec2_url}}'
register: result
ignore_errors: true
- name: assert valid ec2_url parameter
assert:
that:
- 'result.failed'
- 'result.msg.startswith("No handler was ready to authenticate.")'
# ============================================================
- name: test credentials from environment
ec2_elb_lb:
name: "{{ tag_prefix }}"
region: "{{ ec2_region }}"
state: present
zones:
- "{{ ec2_region }}a"
- "{{ ec2_region }}b"
- "{{ ec2_region }}c"
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
environment:
EC2_ACCESS_KEY: bogus_access_key
EC2_SECRET_KEY: bogus_secret_key
register: result
ignore_errors: true
- name: assert credentials from environment
assert:
that:
- 'result.failed'
- '"InvalidClientTokenId" in result.exception'
# ============================================================
- name: test credential parameters
ec2_elb_lb:
name: "{{ tag_prefix }}"
region: "{{ ec2_region }}"
state: present
zones:
- "{{ ec2_region }}a"
- "{{ ec2_region }}b"
- "{{ ec2_region }}c"
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
register: result
ignore_errors: true
- name: assert credential parameters
assert:
that:
- 'result.failed'
- '"No handler was ready to authenticate. 1 handlers were checked." in result.msg'
always:
# ============================================================
- name: remove the test load balancer completely
ec2_elb_lb:
name: "{{ tag_prefix }}"
region: "{{ ec2_region }}"
state: absent
ec2_access_key: "{{ ec2_access_key }}"
ec2_secret_key: "{{ ec2_secret_key }}"
security_token: "{{ security_token }}"
register: result
- name: assert the load balancer was removed
assert:
that:
- 'result.changed'
- 'result.elb.name == "{{tag_prefix}}"'
- 'result.elb.status == "deleted"'

@ -1,2 +0,0 @@
---
# vars file for test_ec2_elb_lb

@ -1,3 +0,0 @@
cloud/aws
shippable/aws/group2
unstable

@ -1,4 +0,0 @@
---
# defaults file for test_ec2_group
ec2_group_name: '{{resource_prefix}}'
ec2_group_description: 'Created by ansible integration tests'

@ -1,3 +0,0 @@
dependencies:
- prepare_tests
- setup_ec2

@ -1,161 +0,0 @@
---
# A Note about ec2 environment variable name preference:
# - EC2_URL -> AWS_URL
# - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY
# - EC2_SECRET_KEY -> AWS_SECRET_ACCESS_KEY -> AWX_SECRET_KEY
# - EC2_REGION -> AWS_REGION
#
# - include: ../../setup_ec2/tasks/common.yml module_name: ec2_group
- block:
# ============================================================
- name: test failure with no parameters
ec2_group:
register: result
ignore_errors: true
- name: assert failure with no parameters
assert:
that:
- 'result.failed'
- 'result.msg == "one of the following is required: name, group_id"'
# ============================================================
- name: test failure with only name
ec2_group:
name: '{{ec2_group_name}}'
register: result
ignore_errors: true
- name: assert failure with only name
assert:
that:
- 'result.failed'
- 'result.msg == "Must provide description when state is present."'
# ============================================================
- name: test failure with only description
ec2_group:
description: '{{ec2_group_description}}'
register: result
ignore_errors: true
- name: assert failure with only description
assert:
that:
- 'result.failed'
- 'result.msg == "one of the following is required: name, group_id"'
# ============================================================
- name: test failure with empty description (AWS API requires non-empty string desc)
ec2_group:
name: '{{ec2_group_name}}'
description: ''
region: '{{ec2_region}}'
register: result
ignore_errors: true
- name: assert failure with empty description
assert:
that:
- 'result.failed'
- 'result.msg == "Must provide description when state is present."'
# ============================================================
- name: test valid region parameter
ec2_group:
name: '{{ec2_group_name}}'
description: '{{ec2_group_description}}'
region: '{{ec2_region}}'
register: result
ignore_errors: true
- name: assert valid region parameter
assert:
that:
- 'result.failed'
- '"Unable to locate credentials" in result.msg'
# ============================================================
- name: test environment variable EC2_REGION
ec2_group:
name: '{{ec2_group_name}}'
description: '{{ec2_group_description}}'
environment:
EC2_REGION: '{{ec2_region}}'
register: result
ignore_errors: true
- name: assert environment variable EC2_REGION
assert:
that:
- 'result.failed'
- '"Unable to locate credentials" in result.msg'
# ============================================================
- name: test invalid ec2_url parameter
ec2_group:
name: '{{ec2_group_name}}'
description: '{{ec2_group_description}}'
environment:
EC2_URL: bogus.example.com
register: result
ignore_errors: true
- name: assert invalid ec2_url parameter
assert:
that:
- 'result.failed'
- 'result.msg.startswith("The ec2_group module requires a region")'
# ============================================================
- name: test valid ec2_url parameter
ec2_group:
name: '{{ec2_group_name}}'
description: '{{ec2_group_description}}'
environment:
EC2_URL: '{{ec2_url}}'
register: result
ignore_errors: true
- name: assert valid ec2_url parameter
assert:
that:
- 'result.failed'
- 'result.msg.startswith("The ec2_group module requires a region")'
# ============================================================
- name: test credentials from environment
ec2_group:
name: '{{ec2_group_name}}'
description: '{{ec2_group_description}}'
environment:
EC2_REGION: '{{ec2_region}}'
EC2_ACCESS_KEY: bogus_access_key
EC2_SECRET_KEY: bogus_secret_key
register: result
ignore_errors: true
- name: assert ec2_group with valid ec2_url
assert:
that:
- 'result.failed'
- '"validate the provided access credentials" in result.msg'
# ============================================================
- name: test credential parameters
ec2_group:
name: '{{ec2_group_name}}'
description: '{{ec2_group_description}}'
ec2_region: '{{ec2_region}}'
ec2_access_key: 'bogus_access_key'
ec2_secret_key: 'bogus_secret_key'
register: result
ignore_errors: true
- name: assert credential parameters
assert:
that:
- 'result.failed'
- '"validate the provided access credentials" in result.msg'

@ -1,44 +0,0 @@
---
- block:
- name: set up aws connection info
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region }}"
no_log: yes
- name: Create a group with only the default rule
ec2_group:
name: '{{ec2_group_name}}-input-tests'
vpc_id: '{{ vpc_result.vpc.id }}'
description: '{{ec2_group_description}}'
<<: *aws_connection_info
- name: Run through some common weird port specs
ec2_group:
name: '{{ec2_group_name}}-input-tests'
description: '{{ec2_group_description}}'
<<: *aws_connection_info
rules:
- "{{ item }}"
with_items:
- proto: tcp
from_port: "8182"
to_port: 8182
cidr_ipv6: "64:ff9b::/96"
rule_desc: Mixed string and non-string ports
- proto: tcp
ports:
- "9000"
- 9001
- 9002-9005
cidr_ip: "1.2.3.0/24"
always:
- name: tidy up input testing group
ec2_group:
name: '{{ec2_group_name}}-input-tests'
vpc_id: '{{ vpc_result.vpc.id }}'
state: absent
<<: *aws_connection_info
ignore_errors: yes

@ -1,184 +0,0 @@
---
- name: set up aws connection info
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region }}"
no_log: yes
# ============================================================
- name: create a group with a rule (CHECK MODE + DIFF)
ec2_group:
name: '{{ ec2_group_name }}'
description: '{{ ec2_group_description }}'
state: present
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
rules_egress:
- proto: all
cidr_ip: 0.0.0.0/0
<<: *aws_connection_info
register: check_mode_result
check_mode: true
diff: true
- assert:
that:
- check_mode_result.changed
- name: create a group with a rule (DIFF)
ec2_group:
name: '{{ ec2_group_name }}'
description: '{{ ec2_group_description }}'
state: present
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
rules_egress:
- proto: all
cidr_ip: 0.0.0.0/0
<<: *aws_connection_info
register: result
diff: true
- assert:
that:
- result.changed
- result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions
- result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress
- name: add rules to make sorting occur (CHECK MODE + DIFF)
ec2_group:
name: '{{ ec2_group_name }}'
description: '{{ ec2_group_description }}'
state: present
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 20.0.0.0/8
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 10.0.0.0/8
rules_egress:
- proto: all
cidr_ip: 0.0.0.0/0
<<: *aws_connection_info
register: check_mode_result
check_mode: true
diff: true
- assert:
that:
- check_mode_result.changed
- name: add rules in a different order to test sorting consistency (DIFF)
ec2_group:
name: '{{ ec2_group_name }}'
description: '{{ ec2_group_description }}'
state: present
rules:
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 20.0.0.0/8
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 10.0.0.0/8
rules_egress:
- proto: all
cidr_ip: 0.0.0.0/0
<<: *aws_connection_info
register: result
diff: true
- assert:
that:
- result.changed
- result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions
- result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress
- name: purge rules (CHECK MODE + DIFF)
ec2_group:
name: '{{ ec2_group_name }}'
description: '{{ ec2_group_description }}'
state: present
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
rules_egress: []
<<: *aws_connection_info
register: check_mode_result
check_mode: true
diff: true
- assert:
that:
- check_mode_result.changed
- name: purge rules (DIFF)
ec2_group:
name: '{{ ec2_group_name }}'
description: '{{ ec2_group_description }}'
state: present
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
rules_egress: []
<<: *aws_connection_info
register: result
diff: true
- assert:
that:
- result.changed
- result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions
- result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress
- name: delete the security group (CHECK MODE + DIFF)
ec2_group:
name: '{{ ec2_group_name }}'
state: absent
<<: *aws_connection_info
register: check_mode_result
diff: true
check_mode: true
- assert:
that:
- check_mode_result.changed
- name: delete the security group (DIFF)
ec2_group:
name: '{{ ec2_group_name }}'
state: absent
<<: *aws_connection_info
register: result
diff: true
- assert:
that:
- result.changed
- not result.diff.0.after and not check_mode_result.diff.0.after

@ -1,88 +0,0 @@
- module_defaults:
group/aws:
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region }}"
block:
- name: Get available AZs
aws_az_facts:
aws_access_key: "{{ aws_connection_info['aws_access_key'] }}"
aws_secret_key: "{{ aws_connection_info['aws_secret_key'] }}"
filters:
region-name: "{{ aws_connection_info['region'] }}"
register: az_facts
- name: Create a classic ELB with classic networking
ec2_elb_lb:
name: "{{ resource_prefix }}-elb"
state: present
zones:
- "{{ az_facts['availability_zones'][0]['zone_name'] }}"
- "{{ az_facts['availability_zones'][1]['zone_name'] }}"
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
proxy_protocol: True
register: classic_elb
- name: Assert the elb was created
assert:
that:
- classic_elb.changed
- name: Create a security group with a classic elb-sg rule
ec2_group:
name: "{{ resource_prefix }}-sg-a"
description: "EC2 classic test security group"
rules:
- proto: tcp
ports: 80
group_id: amazon-elb/amazon-elb-sg
state: present
register: classic_sg
- name: Assert the SG was created
assert:
that:
- classic_sg.changed
- "{{ classic_sg.ip_permissions | length }} == 1"
- set_fact:
elb_sg_id: "{{ classic_sg.ip_permissions[0].user_id_group_pairs[0].user_id }}/{{ classic_sg.ip_permissions[0].user_id_group_pairs[0].group_id }}/{{ classic_sg.ip_permissions[0].user_id_group_pairs[0].group_name }}"
- name: Update the security group
ec2_group:
name: "{{ resource_prefix }}-sg-a"
description: "EC2 classic test security group"
rules:
- proto: tcp
ports: 8080
group_id: "{{ elb_sg_id }}"
- proto: tcp
ports:
- 80
cidr_ip: 0.0.0.0/0
state: present
register: updated_classic_sg
- name: Assert the SG was updated
assert:
that:
- updated_classic_sg.changed
- "{{ updated_classic_sg.ip_permissions | length }} == 2"
- "{{ classic_sg.ip_permissions[0]}} not in {{ updated_classic_sg.ip_permissions }}"
# ===========================================
always:
- name: Terminate classic ELB
ec2_elb_lb:
name: "{{ resource_prefix }}-classic-elb"
state: absent
- name: Delete security group
ec2_group:
name: "{{ resource_prefix }}-sg-a"
state: absent

@ -1,198 +0,0 @@
---
- block:
- name: set up aws connection info
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region }}"
no_log: yes
- name: Create a group with only the default rule
ec2_group:
name: '{{ec2_group_name}}-egress-tests'
vpc_id: '{{ vpc_result.vpc.id }}'
description: '{{ec2_group_description}}'
<<: *aws_connection_info
state: present
register: result
- name: assert default rule is in place (expected changed=true)
assert:
that:
- result is changed
- result.ip_permissions|length == 0
- result.ip_permissions_egress|length == 1
- result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0'
- name: Create a group with only the default rule
ec2_group:
name: '{{ec2_group_name}}-egress-tests'
vpc_id: '{{ vpc_result.vpc.id }}'
description: '{{ec2_group_description}}'
purge_rules_egress: false
<<: *aws_connection_info
state: present
register: result
- name: assert default rule is not purged (expected changed=false)
assert:
that:
- result is not changed
- result.ip_permissions|length == 0
- result.ip_permissions_egress|length == 1
- result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0'
- name: Pass empty egress rules without purging, should leave default rule in place
ec2_group:
name: '{{ec2_group_name}}-egress-tests'
description: '{{ec2_group_description}}'
vpc_id: '{{ vpc_result.vpc.id }}'
purge_rules_egress: false
rules_egress: []
<<: *aws_connection_info
state: present
register: result
- name: assert default rule is not purged (expected changed=false)
assert:
that:
- result is not changed
- result.ip_permissions|length == 0
- result.ip_permissions_egress|length == 1
- result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0'
- name: Purge rules, including the default
ec2_group:
name: '{{ec2_group_name}}-egress-tests'
description: '{{ec2_group_description}}'
vpc_id: '{{ vpc_result.vpc.id }}'
purge_rules_egress: true
rules_egress: []
<<: *aws_connection_info
state: present
register: result
- name: assert default rule is not purged (expected changed=false)
assert:
that:
- result is changed
- result.ip_permissions|length == 0
- result.ip_permissions_egress|length == 0
- name: Add a custom egress rule
ec2_group:
name: '{{ec2_group_name}}-egress-tests'
description: '{{ec2_group_description}}'
vpc_id: '{{ vpc_result.vpc.id }}'
rules_egress:
- proto: tcp
ports:
- 1212
cidr_ip: 1.2.1.2/32
<<: *aws_connection_info
state: present
register: result
- name: assert first rule is here
assert:
that:
- result.ip_permissions_egress|length == 1
- name: Add a second custom egress rule
ec2_group:
name: '{{ec2_group_name}}-egress-tests'
description: '{{ec2_group_description}}'
purge_rules_egress: false
vpc_id: '{{ vpc_result.vpc.id }}'
rules_egress:
- proto: tcp
ports:
- 2323
cidr_ip: 2.3.2.3/32
<<: *aws_connection_info
state: present
register: result
- name: assert the first rule is not purged
assert:
that:
- result.ip_permissions_egress|length == 2
- name: Purge the second rule (CHECK MODE) (DIFF MODE)
ec2_group:
name: '{{ec2_group_name}}-egress-tests'
description: '{{ec2_group_description}}'
vpc_id: '{{ vpc_result.vpc.id }}'
rules_egress:
- proto: tcp
ports:
- 1212
cidr_ip: 1.2.1.2/32
<<: *aws_connection_info
state: present
register: result
check_mode: True
diff: True
- name: assert first rule will be left
assert:
that:
- result.changed
- result.diff.0.after.ip_permissions_egress|length == 1
- result.diff.0.after.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '1.2.1.2/32'
- name: Purge the second rule
ec2_group:
name: '{{ec2_group_name}}-egress-tests'
description: '{{ec2_group_description}}'
vpc_id: '{{ vpc_result.vpc.id }}'
rules_egress:
- proto: tcp
ports:
- 1212
cidr_ip: 1.2.1.2/32
<<: *aws_connection_info
state: present
register: result
- name: assert first rule is here
assert:
that:
- result.ip_permissions_egress|length == 1
- result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '1.2.1.2/32'
- name: add a rule for all TCP ports
ec2_group:
name: '{{ec2_group_name}}-egress-tests'
description: '{{ec2_group_description}}'
rules_egress:
- proto: tcp
ports: 0-65535
cidr_ip: 0.0.0.0/0
<<: *aws_connection_info
state: present
vpc_id: '{{ vpc_result.vpc.id }}'
register: result
- name: Re-add the default rule
ec2_group:
name: '{{ec2_group_name}}-egress-tests'
description: '{{ec2_group_description}}'
rules_egress:
- proto: -1
cidr_ip: 0.0.0.0/0
<<: *aws_connection_info
state: present
vpc_id: '{{ vpc_result.vpc.id }}'
register: result
always:
- name: tidy up egress rule test security group
ec2_group:
name: '{{ec2_group_name}}-egress-tests'
state: absent
vpc_id: '{{ vpc_result.vpc.id }}'
<<: *aws_connection_info
ignore_errors: yes

@ -1,103 +0,0 @@
---
- name: set up aws connection info
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region }}"
no_log: yes
# ============================================================
- name: test state=present for ipv6 (expected changed=true) (CHECK MODE)
ec2_group:
name: '{{ec2_group_name}}'
description: '{{ec2_group_description}}'
<<: *aws_connection_info
state: present
rules:
- proto: "tcp"
from_port: 8182
to_port: 8182
cidr_ipv6: "64:ff9b::/96"
check_mode: true
register: result
- name: assert state=present (expected changed=true)
assert:
that:
- 'result.changed'
# ============================================================
- name: test state=present for ipv6 (expected changed=true)
ec2_group:
name: '{{ec2_group_name}}'
description: '{{ec2_group_description}}'
<<: *aws_connection_info
state: present
rules:
- proto: "tcp"
from_port: 8182
to_port: 8182
cidr_ipv6: "64:ff9b::/96"
register: result
- name: assert state=present (expected changed=true)
assert:
that:
- 'result.changed'
- 'result.group_id.startswith("sg-")'
# ============================================================
- name: test rules_egress state=present for ipv6 (expected changed=true) (CHECK MODE)
ec2_group:
name: '{{ec2_group_name}}'
description: '{{ec2_group_description}}'
<<: *aws_connection_info
state: present
rules:
- proto: "tcp"
from_port: 8182
to_port: 8182
cidr_ipv6: "64:ff9b::/96"
rules_egress:
- proto: "tcp"
from_port: 8181
to_port: 8181
cidr_ipv6: "64:ff9b::/96"
check_mode: true
register: result
- name: assert state=present (expected changed=true)
assert:
that:
- 'result.changed'
# ============================================================
- name: test rules_egress state=present for ipv6 (expected changed=true)
ec2_group:
name: '{{ec2_group_name}}'
description: '{{ec2_group_description}}'
<<: *aws_connection_info
state: present
rules:
- proto: "tcp"
from_port: 8182
to_port: 8182
cidr_ipv6: "64:ff9b::/96"
rules_egress:
- proto: "tcp"
from_port: 8181
to_port: 8181
cidr_ipv6: "64:ff9b::/96"
register: result
- name: assert state=present (expected changed=true)
assert:
that:
- 'result.changed'
- 'result.group_id.startswith("sg-")'
- name: delete it
ec2_group:
name: '{{ec2_group_name}}'
<<: *aws_connection_info
state: absent

File diff suppressed because it is too large Load Diff

@ -1,124 +0,0 @@
- block:
- aws_caller_info:
register: caller_facts
- name: create a VPC
ec2_vpc_net:
name: "{{ resource_prefix }}-vpc-2"
state: present
cidr_block: "10.232.233.128/26"
tags:
Description: "Created by ansible-test"
register: vpc_result_2
- name: Peer the secondary-VPC to the main VPC
ec2_vpc_peer:
vpc_id: '{{ vpc_result_2.vpc.id }}'
peer_vpc_id: '{{ vpc_result.vpc.id }}'
peer_owner_id: '{{ caller_facts.account }}'
peer_region: '{{ aws_region }}'
register: peer_origin
- name: Accept the secondary-VPC peering connection in the main VPC
ec2_vpc_peer:
peer_vpc_id: '{{ vpc_result_2.vpc.id }}'
vpc_id: '{{ vpc_result.vpc.id }}'
state: accept
peering_id: '{{ peer_origin.peering_id }}'
peer_owner_id: '{{ caller_facts.account }}'
peer_region: '{{ aws_region }}'
- name: Create group in second VPC
ec2_group:
name: '{{ ec2_group_name }}-external'
description: '{{ ec2_group_description }}'
vpc_id: '{{ vpc_result_2.vpc.id }}'
state: present
rules:
- proto: "tcp"
cidr_ip: 0.0.0.0/0
ports:
- 80
rule_desc: 'http whoo'
register: external
- name: Create group in internal VPC
ec2_group:
name: '{{ ec2_group_name }}-internal'
description: '{{ ec2_group_description }}'
vpc_id: '{{ vpc_result.vpc.id }}'
state: present
rules:
- proto: "tcp"
group_id: '{{ caller_facts.account }}/{{ external.group_id }}/{{ ec2_group_name }}-external'
ports:
- 80
- name: Re-make same rule, expecting changed=false in internal VPC
ec2_group:
name: '{{ ec2_group_name }}-internal'
description: '{{ ec2_group_description }}'
vpc_id: '{{ vpc_result.vpc.id }}'
state: present
rules:
- proto: "tcp"
group_id: '{{ caller_facts.account }}/{{ external.group_id }}/{{ ec2_group_name }}-external'
ports:
- 80
register: out
- assert:
that:
- out is not changed
- name: Try again with a bad group_id group in internal VPC
ec2_group:
name: '{{ ec2_group_name }}-internal'
description: '{{ ec2_group_description }}'
vpc_id: '{{ vpc_result.vpc.id }}'
state: present
rules:
- proto: "tcp"
group_id: '{{ external.group_id }}/{{ caller_facts.account }}/{{ ec2_group_name }}-external'
ports:
- 80
register: out
ignore_errors: true
- assert:
that:
- out is failed
always:
- pause: seconds=5
- name: Delete secondary-VPC side of peer
ec2_vpc_peer:
vpc_id: '{{ vpc_result_2.vpc.id }}'
peer_vpc_id: '{{ vpc_result.vpc.id }}'
peering_id: '{{ peer_origin.peering_id }}'
state: absent
peer_owner_id: '{{ caller_facts.account }}'
peer_region: '{{ aws_region }}'
ignore_errors: yes
- name: Delete main-VPC side of peer
ec2_vpc_peer:
peer_vpc_id: '{{ vpc_result_2.vpc.id }}'
vpc_id: '{{ vpc_result.vpc.id }}'
state: absent
peering_id: '{{ peer_origin.peering_id }}'
peer_owner_id: '{{ caller_facts.account }}'
peer_region: '{{ aws_region }}'
ignore_errors: yes
- name: Clean up group in second VPC
ec2_group:
name: '{{ ec2_group_name }}-external'
description: '{{ ec2_group_description }}'
state: absent
vpc_id: '{{ vpc_result_2.vpc.id }}'
ignore_errors: yes
- name: Clean up group in second VPC
ec2_group:
name: '{{ ec2_group_name }}-internal'
description: '{{ ec2_group_description }}'
state: absent
vpc_id: '{{ vpc_result.vpc.id }}'
ignore_errors: yes
- name: tidy up VPC
ec2_vpc_net:
name: "{{ resource_prefix }}-vpc-2"
state: absent
cidr_block: "10.232.233.128/26"
ignore_errors: yes
register: removed
retries: 10
until: removed is not failed

@ -1,230 +0,0 @@
---
- name: set up aws connection info
set_fact:
aws_connection_info: &aws_connection_info
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region }}"
no_log: yes
# ============================================================
- name: test state=present for multiple ipv6 and ipv4 targets (expected changed=true) (CHECK MODE)
ec2_group:
name: '{{ ec2_group_name }}'
description: '{{ ec2_group_description }}'
state: present
rules:
- proto: "tcp"
from_port: 8182
to_port: 8182
cidr_ipv6:
- "64:ff9b::/96"
- ["2620::/32"]
- proto: "tcp"
ports: 5665
cidr_ip:
- 172.16.1.0/24
- 172.16.17.0/24
- ["10.0.0.0/24", "20.0.0.0/24"]
<<: *aws_connection_info
check_mode: true
register: result
- name: assert state=present (expected changed=true)
assert:
that:
- 'result.changed'
- name: test state=present for multiple ipv6 and ipv4 targets (expected changed=true)
ec2_group:
name: '{{ ec2_group_name }}'
description: '{{ ec2_group_description }}'
state: present
rules:
- proto: "tcp"
from_port: 8182
to_port: 8182
cidr_ipv6:
- "64:ff9b::/96"
- ["2620::/32"]
- proto: "tcp"
ports: 5665
cidr_ip:
- 172.16.1.0/24
- 172.16.17.0/24
- ["10.0.0.0/24", "20.0.0.0/24"]
<<: *aws_connection_info
register: result
- name: assert state=present (expected changed=true)
assert:
that:
- 'result.changed'
- 'result.ip_permissions | length == 2'
- 'result.ip_permissions[0].ip_ranges | length == 4 or result.ip_permissions[1].ip_ranges | length == 4'
- 'result.ip_permissions[0].ipv6_ranges | length == 2 or result.ip_permissions[1].ipv6_ranges | length == 2'
- name: test state=present for multiple ipv6 and ipv4 targets (expected changed=false) (CHECK MODE)
ec2_group:
name: '{{ ec2_group_name }}'
description: '{{ ec2_group_description }}'
state: present
rules:
- proto: "tcp"
from_port: 8182
to_port: 8182
cidr_ipv6:
- "64:ff9b::/96"
- ["2620::/32"]
- proto: "tcp"
ports: 5665
cidr_ip:
- 172.16.1.0/24
- 172.16.17.0/24
- ["10.0.0.0/24", "20.0.0.0/24"]
<<: *aws_connection_info
check_mode: true
register: result
- name: assert state=present (expected changed=true)
assert:
that:
- 'not result.changed'
- name: test state=present for multiple ipv6 and ipv4 targets (expected changed=false)
ec2_group:
name: '{{ ec2_group_name }}'
description: '{{ ec2_group_description }}'
state: present
rules:
- proto: "tcp"
from_port: 8182
to_port: 8182
cidr_ipv6:
- "64:ff9b::/96"
- ["2620::/32"]
- proto: "tcp"
ports: 5665
cidr_ip:
- 172.16.1.0/24
- 172.16.17.0/24
- ["10.0.0.0/24", "20.0.0.0/24"]
<<: *aws_connection_info
register: result
- name: assert state=present (expected changed=true)
assert:
that:
- 'not result.changed'
- name: test state=present purging a nested ipv4 target (expected changed=true) (CHECK MODE)
ec2_group:
name: '{{ ec2_group_name }}'
description: '{{ ec2_group_description }}'
state: present
rules:
- proto: "tcp"
from_port: 8182
to_port: 8182
cidr_ipv6:
- "64:ff9b::/96"
- ["2620::/32"]
- proto: "tcp"
ports: 5665
cidr_ip:
- 172.16.1.0/24
- 172.16.17.0/24
- ["10.0.0.0/24"]
<<: *aws_connection_info
check_mode: true
register: result
- assert:
that:
- result.changed
- name: test state=present purging a nested ipv4 target (expected changed=true)
ec2_group:
name: '{{ ec2_group_name }}'
description: '{{ ec2_group_description }}'
state: present
rules:
- proto: "tcp"
from_port: 8182
to_port: 8182
cidr_ipv6:
- "64:ff9b::/96"
- ["2620::/32"]
- proto: "tcp"
ports: 5665
cidr_ip:
- 172.16.1.0/24
- 172.16.17.0/24
- ["10.0.0.0/24"]
<<: *aws_connection_info
register: result
- assert:
that:
- result.changed
- 'result.ip_permissions[0].ip_ranges | length == 3 or result.ip_permissions[1].ip_ranges | length == 3'
- 'result.ip_permissions[0].ipv6_ranges | length == 2 or result.ip_permissions[1].ipv6_ranges | length == 2'
- name: test state=present with both associated ipv6 targets nested (expected changed=false)
ec2_group:
name: '{{ ec2_group_name }}'
description: '{{ ec2_group_description }}'
state: present
rules:
- proto: "tcp"
from_port: 8182
to_port: 8182
cidr_ipv6:
- ["2620::/32", "64:ff9b::/96"]
- proto: "tcp"
ports: 5665
cidr_ip:
- 172.16.1.0/24
- 172.16.17.0/24
- ["10.0.0.0/24"]
<<: *aws_connection_info
register: result
- assert:
that:
- not result.changed
- name: test state=present add another nested ipv6 target (expected changed=true)
ec2_group:
name: '{{ ec2_group_name }}'
description: '{{ ec2_group_description }}'
state: present
rules:
- proto: "tcp"
from_port: 8182
to_port: 8182
cidr_ipv6:
- ["2620::/32", "64:ff9b::/96"]
- ["2001:DB8:A0B:12F0::1/64"]
- proto: "tcp"
ports: 5665
cidr_ip:
- 172.16.1.0/24
- 172.16.17.0/24
- ["10.0.0.0/24"]
<<: *aws_connection_info
register: result
- assert:
that:
- result.changed
- 'result.ip_permissions[0].ip_ranges | length == 3 or result.ip_permissions[1].ip_ranges | length == 3'
- 'result.ip_permissions[0].ipv6_ranges | length == 3 or result.ip_permissions[1].ipv6_ranges | length == 3'
- name: delete it
ec2_group:
name: '{{ ec2_group_name }}'
state: absent
<<: *aws_connection_info

@ -1,71 +0,0 @@
---
- block:
- name: set up aws connection info
set_fact:
group_tmp_name: '{{ec2_group_name}}-numbered-protos'
aws_connection_info: &aws_connection_info
aws_access_key: "{{ aws_access_key }}"
aws_secret_key: "{{ aws_secret_key }}"
security_token: "{{ security_token }}"
region: "{{ aws_region }}"
no_log: yes
- name: Create a group with numbered protocol (GRE)
ec2_group:
name: '{{ group_tmp_name }}'
vpc_id: '{{ vpc_result.vpc.id }}'
description: '{{ ec2_group_description }}'
rules:
- proto: 47
to_port: -1
from_port: -1
cidr_ip: 0.0.0.0/0
<<: *aws_connection_info
state: present
register: result
- name: Create a group with a quoted proto
ec2_group:
name: '{{ group_tmp_name }}'
vpc_id: '{{ vpc_result.vpc.id }}'
description: '{{ ec2_group_description }}'
rules:
- proto: '47'
to_port: -1
from_port: -1
cidr_ip: 0.0.0.0/0
<<: *aws_connection_info
state: present
register: result
- assert:
that:
- result is not changed
- name: Add a tag with a numeric value
ec2_group:
name: '{{ group_tmp_name }}'
vpc_id: '{{ vpc_result.vpc.id }}'
description: '{{ ec2_group_description }}'
tags:
foo: 1
<<: *aws_connection_info
- name: Read a tag with a numeric value
ec2_group:
name: '{{ group_tmp_name }}'
vpc_id: '{{ vpc_result.vpc.id }}'
description: '{{ ec2_group_description }}'
tags:
foo: 1
<<: *aws_connection_info
register: result
- assert:
that:
- result is not changed
always:
- name: tidy up egress rule test security group
ec2_group:
name: '{{group_tmp_name}}'
state: absent
vpc_id: '{{ vpc_result.vpc.id }}'
<<: *aws_connection_info
ignore_errors: yes

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save